repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
JsNoNo/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
simon-pepin/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
GPlates/Portal | Utils/EMAG2.py | 1 | 2287 | # coding: utf-8
import struct, math, gzip, os
import numpy
from osgeo import gdal
from gdalconst import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import LinearSegmentedColormap
import histeq, shading
%matplotlib inline
#fig = plt.figure(figsize=(128, 64),dpi=144,frameon=False)
fig = plt.figure(figsize=(12, 6),dpi=144,frameon=False)
m = Basemap(
projection='cyl',
llcrnrlat=-90,
urcrnrlat=90,
llcrnrlon=-180,
urcrnrlon=180,
resolution=None)
m.drawmapboundary(linewidth=0)
dataset = gdal.Open('/mnt/workspace/EMAG2/EMAG2_2m_nearest.nc', GA_ReadOnly )
band = dataset.GetRasterBand(1)
r = band.ReadAsArray( 0, 0, band.XSize, band.YSize, 7600, 3800)
#r = band.ReadAsArray( 0, 0, band.XSize, band.YSize, band.XSize, band.YSize)
#v_min = numpy.nanmin(r)
#v_max = numpy.nanmax(r)
colors = [
[30,30, 30], # -200
[33,40,230 ],
[62,104,230],
[90,168,230],
[150,200,230],
[255,255,255],
[255,255,255],
[230,230,157],
[230,230,30],
[230,162,60],
[230,40,91],
[168,30,168],# 200
]
steps = [
-200,
-60,
-48,
-36,
-20,
-4,
4,
20,
36,
48,
60,
200,
]
color_list = []
for i in range(len(steps)):
color_list.append((float(steps[i]-steps[0])/(steps[-1]-steps[0]), [x/255.0 for x in colors[i]]))
my_cmap = LinearSegmentedColormap.from_list('my_cmap', color_list, N=1024)
where_are_nan =numpy.isnan(r)
r[where_are_nan] =0
where_less_than_minus_200 = (r<-200)
r[where_less_than_minus_200] = -200
where_greater_than_200 = (r>200)
r[where_greater_than_200] = 200
rgb = shading.shade(r,shading.intensity(r), cmap=my_cmap)
rgb[where_are_nan] = [128.0/255,128.0/255,128.0/255]
#masked_data = numpy.ma.masked_where(numpy.isnan(rgb),rgb)
#print masked_data
#m.imshow(masked_data,cmap=cmap,interpolation='sinc')
rgb = numpy.flipud(rgb)
m.imshow(rgb,interpolation='sinc')
#plot.show()
fig.savefig('/mnt/workspace/EMAG2/EMAG2.tiff',bbox_inches='tight',pad_inches=0,dpi=144,transparent=True,frameon=False) | gpl-2.0 |
ljschumacher/tierpsy-tracker | tierpsy/analysis/wcon_export/exportWCON.py | 1 | 9522 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 20:55:19 2016
@author: ajaver
"""
import json
import os
from collections import OrderedDict
import zipfile
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.misc import print_flush
from tierpsy.analysis.feat_create.obtainFeaturesHelper import WormStats
from tierpsy.helper.params import read_unit_conversions, read_ventral_side, read_fps
def getWCONMetaData(fname, READ_FEATURES=False, provenance_step='FEAT_CREATE'):
def _order_metadata(metadata_dict):
ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',
'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',
'habituation', 'who', 'protocol', 'lab', 'software']
extra_fields = metadata_dict.keys() - set(ordered_fields)
ordered_fields += sorted(extra_fields)
ordered_metadata = OrderedDict()
for field in ordered_fields:
if field in metadata_dict:
ordered_metadata[field] = metadata_dict[field]
return ordered_metadata
with tables.File(fname, 'r') as fid:
if not '/experiment_info' in fid:
experiment_info = {}
else:
experiment_info = fid.get_node('/experiment_info').read()
experiment_info = json.loads(experiment_info.decode('utf-8'))
provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()
provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))
commit_hash = provenance_tracking['commit_hash']
if 'tierpsy' in commit_hash:
tierpsy_version = commit_hash['tierpsy']
else:
tierpsy_version = commit_hash['MWTracker']
MWTracker_ver = {"name":"tierpsy (https://github.com/ver228/tierpsy-tracker)",
"version": tierpsy_version,
"featureID":"@OMG"}
if not READ_FEATURES:
experiment_info["software"] = MWTracker_ver
else:
#add open_worm_analysis_toolbox info and save as a list of "softwares"
open_worm_ver = {"name":"open_worm_analysis_toolbox (https://github.com/openworm/open-worm-analysis-toolbox)",
"version":commit_hash['open_worm_analysis_toolbox'],
"featureID":""}
experiment_info["software"] = [MWTracker_ver, open_worm_ver]
return _order_metadata(experiment_info)
def __reformatForJson(A):
if isinstance(A, (int, float)):
return A
good = ~np.isnan(A) & (A != 0)
dd = A[good]
if dd.size > 0:
dd = np.abs(np.floor(np.log10(np.abs(dd)))-2)
precision = max(2, int(np.min(dd)))
A = np.round(A.astype(np.float64), precision)
A = np.where(np.isnan(A), None, A)
#wcon specification require to return a single number if it is only one element list
if A.size == 1:
return A[0]
else:
return A.tolist()
def __addOMGFeat(fid, worm_feat_time, worm_id):
worm_features = OrderedDict()
#add time series features
for col_name, col_dat in worm_feat_time.iteritems():
if not col_name in ['worm_index', 'timestamp']:
worm_features[col_name] = col_dat.values
worm_path = '/features_events/worm_%i' % worm_id
worm_node = fid.get_node(worm_path)
#add event features
for feature_name in worm_node._v_children:
feature_path = worm_path + '/' + feature_name
worm_features[feature_name] = fid.get_node(feature_path)[:]
return worm_features
def _get_ventral_side(features_file):
ventral_side = read_ventral_side(features_file)
if not ventral_side or ventral_side == 'unknown':
ventral_type = '?'
else:
#we will merge the ventral and dorsal contours so the ventral contour is clockwise
ventral_type='CW'
return ventral_type
def _getData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):
if IS_FOR_WCON:
lab_prefix = '@OMG '
else:
lab_prefix = ''
with pd.HDFStore(features_file, 'r') as fid:
if not '/features_timeseries' in fid:
return {} #empty file nothing to do here
features_timeseries = fid['/features_timeseries']
feat_time_group_by_worm = features_timeseries.groupby('worm_index');
ventral_side = _get_ventral_side(features_file)
with tables.File(features_file, 'r') as fid:
#fps used to adjust timestamp to real time
fps = read_fps(features_file)
#get pointers to some useful data
skeletons = fid.get_node('/coordinates/skeletons')
dorsal_contours = fid.get_node('/coordinates/dorsal_contours')
ventral_contours = fid.get_node('/coordinates/ventral_contours')
#let's append the data of each individual worm as a element in a list
all_worms_feats = []
#group by iterator will return sorted worm indexes
for worm_id, worm_feat_time in feat_time_group_by_worm:
worm_id = int(worm_id)
#read worm skeletons data
worm_skel = skeletons[worm_feat_time.index]
worm_dor_cnt = dorsal_contours[worm_feat_time.index]
worm_ven_cnt = ventral_contours[worm_feat_time.index]
#start ordered dictionary with the basic features
worm_basic = OrderedDict()
worm_basic['id'] = str(worm_id)
worm_basic['head'] = 'L'
worm_basic['ventral'] = ventral_side
worm_basic['ptail'] = worm_ven_cnt.shape[1]-1 #index starting with 0
worm_basic['t'] = worm_feat_time['timestamp'].values/fps #convert from frames to seconds
worm_basic['x'] = worm_skel[:, :, 0]
worm_basic['y'] = worm_skel[:, :, 1]
contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))
worm_basic['px'] = contour[:, :, 0]
worm_basic['py'] = contour[:, :, 1]
if READ_FEATURES:
worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)
for feat in worm_features:
worm_basic[lab_prefix + feat] = worm_features[feat]
if IS_FOR_WCON:
for x in worm_basic:
if not x in ['id', 'head', 'ventral', 'ptail']:
worm_basic[x] = __reformatForJson(worm_basic[x])
#append features
all_worms_feats.append(worm_basic)
return all_worms_feats
def _getUnits(features_file, READ_FEATURES=False):
fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)
xy_units = microns_per_pixel_out[1]
time_units = fps_out[2]
units = OrderedDict()
units["size"] = "mm" #size of the plate
units['t'] = time_units #frames or seconds
for field in ['x', 'y', 'px', 'py']:
units[field] = xy_units #(pixels or micrometers)
if READ_FEATURES:
#TODO how to change microns to pixels when required
ws = WormStats()
for field, unit in ws.features_info['units'].iteritems():
units['@OMG ' + field] = unit
return units
def exportWCONdict(features_file, READ_FEATURES=False):
metadata = getWCONMetaData(features_file, READ_FEATURES)
data = _getData(features_file, READ_FEATURES)
units = _getUnits(features_file, READ_FEATURES)
#units = {x:units[x].replace('degrees', '1') for x in units}
#units = {x:units[x].replace('radians', '1') for x in units}
wcon_dict = OrderedDict()
wcon_dict['metadata'] = metadata
wcon_dict['units'] = units
wcon_dict['data'] = data
return wcon_dict
def getWCOName(features_file):
return features_file.replace('_features.hdf5', '.wcon.zip')
def exportWCON(features_file, READ_FEATURES=False):
base_name = os.path.basename(features_file).replace('_features.hdf5', '')
print_flush("{} Exporting data to WCON...".format(base_name))
wcon_dict = exportWCONdict(features_file, READ_FEATURES)
wcon_file = getWCOName(features_file)
#with gzip.open(wcon_file, 'wt') as fid:
# json.dump(wcon_dict, fid, allow_nan=False)
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
wcon_txt = json.dumps(wcon_dict, allow_nan=False, separators=(',', ':'))
zf.writestr(zip_name, wcon_txt)
print_flush("{} Finised to export to WCON.".format(base_name))
if __name__ == '__main__':
features_file = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/single_worm/global_sample_v3/883 RC301 on food R_2011_03_07__11_10_27___8___1_features.hdf5'
#exportWCON(features_file)
wcon_file = getWCOName(features_file)
wcon_dict = exportWCONdict(features_file)
wcon_txt = json.dumps(wcon_dict, allow_nan=False, indent=4)
#%%
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
zf.writestr(zip_name, wcon_txt)
#%%
# import wcon
# wc = wcon.WCONWorms()
# wc = wc.load_from_file(JSON_path, validate_against_schema = False)
| mit |
dsm054/pandas | pandas/tests/indexes/timedeltas/test_formats.py | 9 | 3573 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
from pandas import TimedeltaIndex
class TestTimedeltaIndexRendering(object):
@pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = ("0 1 days\n"
"dtype: timedelta64[ns]")
exp3 = ("0 1 days\n"
"1 2 days\n"
"dtype: timedelta64[ns]")
exp4 = ("0 1 days\n"
"1 2 days\n"
"2 3 days\n"
"dtype: timedelta64[ns]")
exp5 = ("0 1 days 00:00:01\n"
"1 2 days 00:00:00\n"
"2 3 days 00:00:00\n"
"dtype: timedelta64[ns]")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = ("TimedeltaIndex: 0 entries\n"
"Freq: D")
exp2 = ("TimedeltaIndex: 1 entries, 1 days to 1 days\n"
"Freq: D")
exp3 = ("TimedeltaIndex: 2 entries, 1 days to 2 days\n"
"Freq: D")
exp4 = ("TimedeltaIndex: 3 entries, 1 days to 3 days\n"
"Freq: D")
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx._summary()
assert result == expected
| bsd-3-clause |
McIntyre-Lab/papers | nanni_maize_2021/scripts/evaluate_htseq_counts.py | 1 | 5064 | #!/usr/bin/env python
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def getOptions():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Evaluate expression from flag on off file")
# Input data
parser.add_argument("-f", "--flag", dest="inFlag", required=True, help="Input full flag on/off TSV file of novel loci")
# Output data
parser.add_argument("-p", "--prefix", dest="outPrefix", required=True, help="Output file prefix")
args = parser.parse_args()
return args
def main():
# Get input flag file
flagDF = pd.read_csv(args.inFlag,sep="\t")
# Flag genotypes when a least one treatment is detected
for genotype in ['B73','C123','Hp301','Mo17','NC338']:
flagDF['flag_'+genotype] = np.where(flagDF['flag_'+genotype+'_Amb']+flagDF['flag_'+genotype+'_Ele']>0,1,0)
flagDF['sum_genotype_flag'] = flagDF[[c for c in flagDF.columns if ("flag_" in c)&("Amb" not in c)&("Ele" not in c)]].sum(axis=1)
# Get number of genes detected in each group of samples
countDF = flagDF.groupby([c for c in flagDF.columns if ("flag_" in c)&("Amb" not in c)&("Ele" not in c)])['gene_id'].count().reset_index().rename(columns={'gene_id':'num_gene'}).sort_values('num_gene',ascending=False)
print("Number of genes detected in at least one treatment of each genotype:\n{}".format(countDF.to_string(index=False)))
# Plot log10 transformed mean expression values
logDF = flagDF.set_index('gene_id')[[c for c in flagDF.columns if "mean" in c]].copy()
for genotype in ['B73','C123','Hp301','Mo17','NC338']:
logDF['log_mean_'+genotype+"_Amb"] = np.log2(logDF['mean_'+genotype+"_Amb"])
logDF['log_mean_'+genotype+"_Amb"] = np.where(logDF['log_mean_'+genotype+"_Amb"]==(-np.inf),np.nan,logDF['log_mean_'+genotype+"_Amb"])
logDF['log_mean_'+genotype+"_Ele"] = np.log2(logDF['mean_'+genotype+"_Ele"])
logDF['log_mean_'+genotype+"_Ele"] = np.where(logDF['log_mean_'+genotype+"_Ele"]==(-np.inf),np.nan,logDF['log_mean_'+genotype+"_Ele"])
cols = [c for c in logDF.columns if "log" in c]
xlabels = list(map(lambda x: "_".join(x.split("_")[2:]),cols))
logPlot = logDF[cols].plot.box(figsize=(12,12),rot=45)
logPlot.set_xticklabels(xlabels)
logPlot.set_ylabel("Log2(mean # reads)")
plt.show(logPlot)
plt.tight_layout()
plt.savefig("{}_log2meanExpression_by_geno_trt.png".format(args.outPrefix),dpi=600,format="png")
plt.savefig("{}_log2meanExpression_by_geno_trt.pdf".format(args.outPrefix),dpi=600,format="pdf")
for genotype in ['B73','C123','Hp301','Mo17','NC338']:
logDF['log_mean_'+genotype] = np.log2(logDF['mean_'+genotype+"_Amb"]+logDF['mean_'+genotype+"_Ele"])
logDF['log_mean_'+genotype] = np.where(logDF['log_mean_'+genotype]==(-np.inf),np.nan,logDF['log_mean_'+genotype])
cols = [c for c in logDF.columns if ("log" in c)&("Amb" not in c)&("Ele" not in c)]
xlabels = list(map(lambda x: "_".join(x.split("_")[2:]),cols))
logPlotGeno = logDF[cols].plot.box(figsize=(12,12),rot=45)
logPlotGeno.set_xticklabels(xlabels)
logPlotGeno.set_ylabel("Log2(mean # reads)")
plt.show(logPlotGeno)
plt.tight_layout()
plt.savefig("{}_log2meanExpression_by_geno_sum.png".format(args.outPrefix),dpi=600,format="png")
plt.savefig("{}_log2meanExpression_by_geno_sum.pdf".format(args.outPrefix),dpi=600,format="pdf")
# meanDF = flagDF.groupby([c for c in flagDF.columns if ("flag_" in c)&("Amb" not in c)&("Ele" not in c)]).agg({
# 'gene_id':'count',
# 'mean_B73_Amb':'describe',
# 'mean_B73_Ele':'describe',
# 'mean_C123_Amb':'describe',
# 'mean_C123_Ele':'describe',
# 'mean_Hp301_Amb':'describe',
# 'mean_Hp301_Ele':'describe',
# 'mean_Mo17_Amb':'describe',
# 'mean_Mo17_Ele':'describe',
# 'mean_NC338_Amb':'describe',
# 'mean_NC338_Ele':'describe'}).reset_index()
# noNC = flagDF[(flagDF['flag_NC338']==0)&(flagDF['sum_genotype_flag']==4)].copy()
# noNC = noNC.set_index('gene_id')
# noNC[[c for c in noNC.columns if "mean" in c]].plot.box(figsize=(12,12))
# onlyNC = flagDF[(flagDF['flag_NC338']==1)&(flagDF['sum_genotype_flag']==1)].copy()
# onlyNC = onlyNC.set_index('gene_id')
# onlyNC[[c for c in onlyNC.columns if "mean" in c]].plot.box(figsize=(12,12))
# noB73 = flagDF[(flagDF['flag_B73']==0)&(flagDF['sum_genotype_flag']==4)].copy()
# noB73 = noB73.set_index('gene_id')
# noB73[[c for c in noB73.columns if "mean" in c]].plot.box(figsize=(12,12))
onlyB73 = flagDF[(flagDF['flag_B73']==1)&(flagDF['sum_genotype_flag']==1)].copy()
onlyB73.to_csv(args.outPrefix+"_B73_only_novel_loci.csv",index=False)
onlyB73 = onlyB73.set_index('gene_id')
onlyB73[[c for c in onlyB73.columns if "mean" in c]].plot.box(figsize=(12,12))
if __name__ == '__main__':
# Parse command line arguments
global args
args = getOptions()
main()
| lgpl-3.0 |
weegreenblobbie/nsound | src/examples/bebot-disp.py | 1 | 12315 | """
$Id: bebot-disp.py 912 2015-07-26 00:50:29Z weegreenblobbie $
Nsound is a C++ library and Python module for audio synthesis featuring
dynamic digital filters. Nsound lets you easily shape waveforms and write
to disk or plot them. Nsound aims to be as powerful as Csound but easy to
use.
Copyright (c) 2004 to Present Nick Hilton
weegreenblobbie_at_yahoo_com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
import argparse
import sys
import Queue as queue
import multiprocessing
import datetime
now = datetime.datetime.now
import matplotlib
#~matplotlib.use('TkAgg') # not antigrain smoothing for faster render times
from matplotlib import pylab
import pygame
import Nsound as ns
import numpy as np
#------------------------------------------------------------------------------
# Globals
SR = 48000.0
CHANNELS = 2
N_BUFFERS = 3
LO_T = 0.02
HI_T = 0.05 # Higher latency for non-JACK
WINDOW_SIZE = 400
DEBUG_RECORD = False
#-------------------------------------------
# oscilliscope display settings
DISP_WIDTH = 0.05
# spectrogram settings
SPEC_WINDOW = 0.100 # seconds
SPEC_MAX_FREQ = 16000.0 # Hz
def limit(x, y):
if x < 0:
x = 0
if x >= WINDOW_SIZE:
x = WINDOW_SIZE - 1
if y < 0:
y = 0
if y >= WINDOW_SIZE:
y = WINDOW_SIZE - 1
return x, y
def worker(q, disp_q, t):
#--------------------------------------------------------------------------
# Initialize
# worker state machine
enum = 0
WAITING = enum ; enum += 1
PLAYING = enum ; enum += 1
RELEASING = enum; enum += 1
try:
playback = ns.AudioPlaybackRt(SR, CHANNELS, N_BUFFERS, t)
except:
q.put(False)
sys.stderr.write("FAILURE: failed to create audio playback object!\n")
sys.stderr.flush()
return
gen = ns.Sine(SR)
gen.setRealtime(True)
lpf = ns.FilterLowPassIIR(SR, 6, 80, 0.00)
lpf.setRealtime(True)
gent = ns.Triangle(SR, 0.02, 0.02)
gent.setRealtime(True)
gend = ns.GeneratorDecay(SR)
gend.setRealtime(True)
gend.addSlaveSync(gen)
# Tell the main thread we succeeded in initializing our
# nsound objects.
q.put(True)
while q.full():
pass
#--------------------------------------------------------------------------
# lookup tables
freq_min = 40
freq_max = 650
ks_min = 200
ks_max = 8000
alpha_min = 6
alpha_max = 36
f_cut_min = 1000
f_cut_max = 7000
# Make tables
g = ns.Generator(1)
freq_table = g.drawLine(WINDOW_SIZE, freq_min, freq_max)
ks_table = g.drawLine(WINDOW_SIZE, ks_min, ks_max)
alpha_table = g.drawLine(WINDOW_SIZE, alpha_min, alpha_max)
f_cut_table = g.drawLine(WINDOW_SIZE, f_cut_min, f_cut_max)
state = WAITING
x0, y0 = None, None
x1, y1 = None, None
reset_first = False
if DEBUG_RECORD:
aout = ns.AudioStream(SR, 1)
aid = 0
#--------------------------------------------------------------------------
# Sound processing loop
dur = 0.01
process_pos = True
while True:
try:
pos = q.get(False)
process_pos = True
except queue.Empty:
process_pos = False
if process_pos:
if pos == "QUIT":
playback.stop()
return
elif pos == "STOP":
gen.reset()
gend.reset()
gent.reset()
lpf.reset()
playback.stop()
state = WAITING
disp_q.put("RESET")
if DEBUG_RECORD:
fn = "bebot-%02d.wav" % aid
aout >> fn
print "Wrote %s" % fn
aout = ns.AudioStream(SR, 1)
aid += 1
elif state == WAITING:
x1, y1 = pos
y1 = WINDOW_SIZE - y1
x1, y1 = limit(x1, y1)
if x0 is None:
x0, y0 = x1, y1
state = PLAYING
elif state == PLAYING:
x1, y1 = pos
y1 = WINDOW_SIZE - y1
x1, y1 = limit(x1, y1)
if state != PLAYING:
continue
f0 = freq_table[x0]
f1 = freq_table[x1]
k0 = ks_table[y0]
k1 = ks_table[y1]
a0 = alpha_table[y0]
a1 = alpha_table[y1]
fc0 = f_cut_table[y0]
fc1 = f_cut_table[y1]
freqs = gen.drawLine(dur, f0, f1)
ks = gen.drawLine(dur, k0, k1)
alpha = gen.drawLine(dur, a0, a1)
f_cut = gen.drawLine(dur, fc0, fc1)
temp = gend.generate2(dur, freqs, alpha)
temp *= gen.generate(dur, ks)
temp += 1.0
temp *= gent.generate(dur, freqs)
temp /= 2.0
temp = 0.333 * lpf.filter(temp, f_cut)
disp_q.put(temp)
temp >> playback
if DEBUG_RECORD:
aout << temp
x0, y0 = x1, y1
def display(q):
'''
The worker thread sends audio data this display thread for real-time
oscilliscope display.
'''
pylab.ion()
fig = pylab.figure(figsize=(12,6))
ax1 = fig.add_subplot(211)
ax1.grid(True)
ax1.set_xlabel('Time')
ax1.set_ylabel('Amplitude')
ax2 = fig.add_subplot(212)
ax2.grid(True)
ax2.set_xlabel('Frequency (Hz)')
ax2.set_ylabel('Amplitude')
#--------------------------------------------------------------------------
# Create oscilliscope data and plot axes
xdata = np.arange(int(DISP_WIDTH * SR + 0.5)) / float(SR)
n_samples = len(xdata)
ydata = ns.Buffer.zeros(n_samples)
ax1.axis([xdata[0], xdata[-1], -0.5, 0.5])
line1, = ax1.plot(xdata, ydata, rasterized = True)
# Create spectrogram data and plot axes
spec_args = (SR, SPEC_WINDOW, 0.010, ns.NUTTALL)
spec = ns.Spectrogram(ydata, *spec_args)
spec_xaxis = np.array(spec.getFrequencyAxis())
spec_yzeros = np.array(spec.computeMagnitude(ydata)[1:])
line2, = ax2.plot(spec_xaxis, spec_yzeros, rasterized = True)
ax2.set_xlim(20, SPEC_MAX_FREQ)
ax2.set_xscale('log')
ax2.set_ylim(-90, 60)
fig.canvas.draw()
# cache the backgrounds
bg1 = fig.canvas.copy_from_bbox(ax1.bbox)
bg2 = fig.canvas.copy_from_bbox(ax2.bbox)
cbuf = ns.CircularBuffer(2*n_samples)
d0 = now()
dcount = 0
while True:
try:
d = q.get(False)
have_d = True
except queue.Empty:
have_d = False
if not have_d: continue
if d == "QUIT":
print "dcount = ", dcount
return
elif d == "RESET":
data = np.zeros(n_samples)
line1.set_ydata(data)
line2.set_ydata(spec_yzeros)
ax1.draw_artist(ax1.patch)
ax1.draw_artist(line1)
ax2.draw_artist(ax2.patch)
ax2.draw_artist(line2)
fig.canvas.draw()
fig.canvas.flush_events()
cbuf.write(ns.Buffer.zeros(2*n_samples))
continue
cbuf.write(d)
d1 = now()
dt = d1 - d0
if dt.total_seconds() >= (1.0/30):
dcount += 1
data = cbuf.read()
#------------------------------------------------------------------
# oscilliscope
# Search the middle part of the data for the peak value
n = len(data)
i0 = n / 4
i1 = i0 + n_samples/2
middle = data[i0 : i1]
imax = i0 + middle.argmax()
# keep peak at 25 % into the window
i0 = imax - n_samples/4
i1 = i0 + n_samples
ydata = data[i0 : i1]
#------------------------------------------------------------------
# spectrogram
spec_ydata = spec.computeMagnitude(data).getdB()[1:]
#------------------------------------------------------------------
# Redraw plot
line1.set_ydata(ydata)
line2.set_ydata(spec_ydata)
# restore background
fig.canvas.restore_region(bg1)
fig.canvas.restore_region(bg2)
ax1.draw_artist(ax1.patch)
ax1.draw_artist(line1)
ax2.draw_artist(ax2.patch)
ax2.draw_artist(line2)
fig.canvas.blit(ax1.bbox)
fig.canvas.blit(ax2.bbox)
# cache the backgrounds
bg1 = fig.canvas.copy_from_bbox(ax1.bbox)
bg2 = fig.canvas.copy_from_bbox(ax2.bbox)
d0 = now()
def main():
parser = argparse.ArgumentParser(description = 'bebot')
parser.add_argument(
'-j',
'--jack',
dest = 'use_jack',
default = False,
action = 'store_true',
help = """Use the JACK portaudio Host API, if the JACK server isn't
running, you will get undefined behavior""")
args = parser.parse_args()
# Low latency
if args.use_jack:
print "Will try to use JACK (known to work on Linux)"
ns.AudioPlaybackRt.use_jack(True)
t = LO_T
# High latency
else:
print "Using High Latency"
t = HI_T
print "Hello Bebot!"
print "Press ESC to quit."
q = multiprocessing.Queue(maxsize = 1)
disp_q = multiprocessing.Queue()
p = multiprocessing.Process(target = worker, args = (q, disp_q, t))
p.start()
disp_p = multiprocessing.Process(target = display, args = (disp_q,))
disp_p.start()
# read from queue that the process initialized and is ready.
good = q.get()
if not good:
sys.exit(1)
# Init only the display, otherwise when using JACK Audio Connection Kit,
# pygame won't exit cleanly.
pygame.display.init()
# Open window
window = pygame.display.set_mode((WINDOW_SIZE, WINDOW_SIZE))
pygame.display.set_caption("Bebot! %s" % ns.__package__)
# Turn off key repeats
pygame.key.set_repeat()
enum = 0
IDLE = enum ; enum += 1
PLAYING = enum ; enum += 1
state = IDLE
main_loop = True
while main_loop:
event = pygame.event.wait()
#print "event = ", event
if state == PLAYING and event.type == pygame.MOUSEMOTION:
q.put(event.pos)
elif event.type == pygame.MOUSEBUTTONDOWN:
q.put(event.pos)
state = PLAYING
elif event.type == pygame.MOUSEBUTTONUP:
q.put("STOP")
state = IDLE
elif event.type == pygame.KEYDOWN:
# Break out of loop
if event.key == pygame.K_ESCAPE:
print "Quitting main loop"
sys.stdout.flush()
main_loop = False
break
elif event.type == pygame.QUIT:
print "Quitting main loop"
sys.stdout.flush()
main_loop = False
break
q.put("QUIT")
p.join()
disp_q.put("QUIT")
disp_p.join()
pygame.display.quit()
print "Goodbye!"
if __name__ == "__main__":
main()
| gpl-2.0 |
teonlamont/mne-python | mne/io/proj.py | 2 | 33757 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from copy import deepcopy
from itertools import count
from math import sqrt
import numpy as np
from scipy import linalg
from .tree import dir_tree_find
from .tag import find_tag
from .constants import FIFF
from .pick import pick_types
from .write import (write_int, write_float, write_string, write_name_list,
write_float_matrix, end_block, start_block)
from ..utils import logger, verbose, warn
from ..externals.six import string_types
class Projection(dict):
"""Projection vector.
A basic class to proj a meaningful print for projection vectors.
"""
def __repr__(self): # noqa: D105
s = "%s" % self['desc']
s += ", active : %s" % self['active']
s += ", n_channels : %s" % self['data']['ncol']
return "<Projection | %s>" % s
# Can't use copy_ function here b/c of circular import
def plot_topomap(self, layout=None, cmap=None, sensors=True,
colorbar=False, res=64, size=1, show=True,
outlines='head', contours=6, image_interp='bilinear',
axes=None, info=None):
"""Plot topographic maps of SSP projections.
Parameters
----------
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode (only works if ``colorbar=True``) the colors are
adjustable by clicking and dragging the colorbar with left and right
mouse button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range. Up
and down arrows can be used to change the colormap. If None (default),
'Reds' is used for all positive data, otherwise defaults to 'RdBu_r'.
If 'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
info : instance of Info | None
The measurement information to use to determine the layout.
If not None, ``layout`` must be None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
Notes
-----
.. versionadded:: 0.15.0
""" # noqa: E501
from ..viz.topomap import plot_projs_topomap
return plot_projs_topomap([self], layout, cmap, sensors, colorbar,
res, size, show, outlines,
contours, image_interp, axes, info)
class ProjMixin(object):
"""Mixin class for Raw, Evoked, Epochs.
Notes
-----
This mixin adds a proj attribute as a property to data containers.
It is True if at least one proj is present and all of them are active.
The projs might not be applied yet if data are not preloaded. In
this case it's the _projector attribute that does the job.
If a private _data attribute is present then the projs applied
to it are the ones marked as active.
A proj parameter passed in constructor of raw or epochs calls
apply_proj and hence after the .proj attribute is True.
As soon as you've applied the projs it will stay active in the
remaining pipeline.
The suggested pipeline is proj=True in epochs (it's cheaper than for raw).
When you use delayed SSP in Epochs, projs are applied when you call
get_data() method. They are not applied to the evoked._data unless you call
apply_proj(). The reason is that you want to reject with projs although
it's not stored in proj mode.
"""
@property
def proj(self):
"""Whether or not projections are active."""
return (len(self.info['projs']) > 0 and
all(p['active'] for p in self.info['projs']))
@verbose
def add_proj(self, projs, remove_existing=False, verbose=None):
"""Add SSP projection vectors.
Parameters
----------
projs : list
List with projection vectors.
remove_existing : bool
Remove the projection vectors currently in the file.
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more).
Returns
-------
self : instance of Raw | Epochs | Evoked
The data container.
"""
if isinstance(projs, Projection):
projs = [projs]
if (not isinstance(projs, list) and
not all(isinstance(p, Projection) for p in projs)):
raise ValueError('Only projs can be added. You supplied '
'something else.')
# mark proj as inactive, as they have not been applied
projs = deactivate_proj(projs, copy=True, verbose=self.verbose)
if remove_existing:
# we cannot remove the proj if they are active
if any(p['active'] for p in self.info['projs']):
raise ValueError('Cannot remove projectors that have '
'already been applied')
self.info['projs'] = projs
else:
self.info['projs'].extend(projs)
# We don't want to add projectors that are activated again.
self.info['projs'] = _uniquify_projs(self.info['projs'],
check_active=False, sort=False)
return self
def apply_proj(self):
"""Apply the signal space projection (SSP) operators to the data.
Notes
-----
Once the projectors have been applied, they can no longer be
removed. It is usually not recommended to apply the projectors at
too early stages, as they are applied automatically later on
(e.g. when computing inverse solutions).
Hint: using the copy method individual projection vectors
can be tested without affecting the original data.
With evoked data, consider the following example::
projs_a = mne.read_proj('proj_a.fif')
projs_b = mne.read_proj('proj_b.fif')
# add the first, copy, apply and see ...
evoked.add_proj(a).copy().apply_proj().plot()
# add the second, copy, apply and see ...
evoked.add_proj(b).copy().apply_proj().plot()
# drop the first and see again
evoked.copy().del_proj(0).apply_proj().plot()
evoked.apply_proj() # finally keep both
Returns
-------
self : instance of Raw | Epochs | Evoked
The instance.
"""
from ..epochs import BaseEpochs
from ..evoked import Evoked
from .base import BaseRaw
if self.info['projs'] is None or len(self.info['projs']) == 0:
logger.info('No projector specified for this dataset. '
'Please consider the method self.add_proj.')
return self
# Exit delayed mode if you apply proj
if isinstance(self, BaseEpochs) and self._do_delayed_proj:
logger.info('Leaving delayed SSP mode.')
self._do_delayed_proj = False
if all(p['active'] for p in self.info['projs']):
logger.info('Projections have already been applied. '
'Setting proj attribute to True.')
return self
_projector, info = setup_proj(deepcopy(self.info), add_eeg_ref=False,
activate=True, verbose=self.verbose)
# let's not raise a RuntimeError here, otherwise interactive plotting
if _projector is None: # won't be fun.
logger.info('The projections don\'t apply to these data.'
' Doing nothing.')
return self
self._projector, self.info = _projector, info
if isinstance(self, (BaseRaw, Evoked)):
if self.preload:
self._data = np.dot(self._projector, self._data)
else: # BaseEpochs
if self.preload:
for ii, e in enumerate(self._data):
self._data[ii] = self._project_epoch(e)
else:
self.load_data() # will automatically apply
logger.info('SSP projectors applied...')
return self
def del_proj(self, idx='all'):
"""Remove SSP projection vector.
Note: The projection vector can only be removed if it is inactive
(has not been applied to the data).
Parameters
----------
idx : int | list of int | str
Index of the projector to remove. Can also be "all" (default)
to remove all projectors.
Returns
-------
self : instance of Raw | Epochs | Evoked
"""
if isinstance(idx, string_types) and idx == 'all':
idx = list(range(len(self.info['projs'])))
idx = np.atleast_1d(np.array(idx, int)).ravel()
if any(self.info['projs'][ii]['active'] for ii in idx):
raise ValueError('Cannot remove projectors that have already '
'been applied')
keep = np.ones(len(self.info['projs']))
keep[idx] = False # works with negative indexing and does checks
self.info['projs'] = [p for p, k in zip(self.info['projs'], keep) if k]
return self
def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):
"""Plot SSP vector.
Parameters
----------
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted. If None
(default), it will return all channel types present. If a list of
ch_types is provided, it will return multiple figures.
layout : None | Layout | List of Layouts
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations. Or a list of Layout if projections
are from different sensor types.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
if self.info['projs'] is not None or len(self.info['projs']) != 0:
from ..viz.topomap import plot_projs_topomap
from ..channels.layout import find_layout
if layout is None:
layout = []
if ch_type is None:
ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]
elif isinstance(ch_type, string_types):
ch_type = [ch_type]
for ch in ch_type:
if ch in self:
layout.append(find_layout(self.info, ch, exclude=[]))
else:
warn('Channel type %s is not found in info.' % ch)
fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)
else:
raise ValueError("Info is missing projs. Nothing to plot.")
return fig
def _proj_equal(a, b, check_active=True):
"""Test if two projectors are equal."""
equal = ((a['active'] == b['active'] or not check_active) and
a['kind'] == b['kind'] and
a['desc'] == b['desc'] and
a['data']['col_names'] == b['data']['col_names'] and
a['data']['row_names'] == b['data']['row_names'] and
a['data']['ncol'] == b['data']['ncol'] and
a['data']['nrow'] == b['data']['nrow'] and
np.all(a['data']['data'] == b['data']['data']))
return equal
@verbose
def _read_proj(fid, node, verbose=None):
"""Read spatial projections from a FIF file.
Parameters
----------
fid : file
The file descriptor of the open file.
node : tree node
The node of the tree where to look.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list of Projection
The list of projections.
"""
projs = list()
# Locate the projection data
nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)
if len(nodes) == 0:
return projs
tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)
if tag is not None:
global_nchan = int(tag.data)
items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)
for item in items:
# Find all desired tags in one item
tag = find_tag(fid, item, FIFF.FIFF_NCHAN)
if tag is not None:
nchan = int(tag.data)
else:
nchan = global_nchan
tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)
if tag is not None:
desc = tag.data
else:
tag = find_tag(fid, item, FIFF.FIFF_NAME)
if tag is not None:
desc = tag.data
else:
raise ValueError('Projection item description missing')
# XXX : is this useful ?
# tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
# if tag is not None:
# namelist = tag.data
# else:
# raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
if tag is not None:
kind = int(tag.data)
else:
raise ValueError('Projection item kind missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
if tag is not None:
nvec = int(tag.data)
else:
raise ValueError('Number of projection vectors not specified')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
if tag is not None:
names = tag.data.split(':')
else:
raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
if tag is not None:
data = tag.data
else:
raise ValueError('Projection item data missing')
tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
if tag is not None:
active = bool(tag.data)
else:
active = False
tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR)
if tag is not None:
explained_var = tag.data
else:
explained_var = None
# handle the case when data is transposed for some reason
if data.shape[0] == len(names) and data.shape[1] == nvec:
data = data.T
if data.shape[1] != len(names):
raise ValueError('Number of channel names does not match the '
'size of data matrix')
# Use exactly the same fields in data as in a named matrix
one = Projection(kind=kind, active=active, desc=desc,
data=dict(nrow=nvec, ncol=nchan, row_names=None,
col_names=names, data=data),
explained_var=explained_var)
projs.append(one)
if len(projs) > 0:
logger.info(' Read a total of %d projection items:' % len(projs))
for k in range(len(projs)):
if projs[k]['active']:
misc = 'active'
else:
misc = ' idle'
logger.info(' %s (%d x %d) %s'
% (projs[k]['desc'], projs[k]['data']['nrow'],
projs[k]['data']['ncol'], misc))
return projs
###############################################################################
# Write
def _write_proj(fid, projs):
"""Write a projection operator to a file.
Parameters
----------
fid : file
The file descriptor of the open file.
projs : dict
The projection operator.
"""
if len(projs) == 0:
return
start_block(fid, FIFF.FIFFB_PROJ)
for proj in projs:
start_block(fid, FIFF.FIFFB_PROJ_ITEM)
write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
proj['data']['col_names'])
write_string(fid, FIFF.FIFF_NAME, proj['desc'])
write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)
write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])
write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])
write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,
proj['data']['data'])
if proj['explained_var'] is not None:
write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
proj['explained_var'])
end_block(fid, FIFF.FIFFB_PROJ_ITEM)
end_block(fid, FIFF.FIFFB_PROJ)
###############################################################################
# Utils
def _check_projs(projs, copy=True):
"""Check that projs is a list of Projection."""
if not isinstance(projs, (list, tuple)):
raise TypeError('projs must be a list or tuple, got %s'
% (type(projs),))
for pi, p in enumerate(projs):
if not isinstance(p, Projection):
raise TypeError('All entries in projs list must be Projection '
'instances, but projs[%d] is type %s'
% (pi, type(p)))
return deepcopy(projs) if copy else projs
def make_projector(projs, ch_names, bads=(), include_active=True):
"""Create an SSP operator from SSP projection vectors.
Parameters
----------
projs : list
List of projection vectors.
ch_names : list of str
List of channels to include in the projection matrix.
bads : list of str
Some bad channels to exclude. If bad channels were marked
in the raw file when projs were calculated using mne-python,
they should not need to be included here as they will
have been automatically omitted from the projectors.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
U : array
The orthogonal basis of the projection vectors (optional).
"""
return _make_projector(projs, ch_names, bads, include_active)
def _make_projector(projs, ch_names, bads=(), include_active=True,
inplace=False):
"""Subselect projs based on ch_names and bads.
Use inplace=True mode to modify ``projs`` inplace so that no
warning will be raised next time projectors are constructed with
the given inputs. If inplace=True, no meaningful data are returned.
"""
nchan = len(ch_names)
if nchan == 0:
raise ValueError('No channel names specified')
default_return = (np.eye(nchan, nchan), 0, [])
# Check trivial cases first
if projs is None:
return default_return
nvec = 0
nproj = 0
for p in projs:
if not p['active'] or include_active:
nproj += 1
nvec += p['data']['nrow']
if nproj == 0:
return default_return
# Pick the appropriate entries
vecs = np.zeros((nchan, nvec))
nvec = 0
nonzero = 0
bads = set(bads)
for k, p in enumerate(projs):
if not p['active'] or include_active:
if (len(p['data']['col_names']) !=
len(np.unique(p['data']['col_names']))):
raise ValueError('Channel name list in projection item %d'
' contains duplicate items' % k)
# Get the two selection vectors to pick correct elements from
# the projection vectors omitting bad channels
sel = []
vecsel = []
p_set = set(p['data']['col_names']) # faster membership access
for c, name in enumerate(ch_names):
if name not in bads and name in p_set:
sel.append(c)
vecsel.append(p['data']['col_names'].index(name))
# If there is something to pick, pickit
nrow = p['data']['nrow']
this_vecs = vecs[:, nvec:nvec + nrow]
if len(sel) > 0:
this_vecs[sel] = p['data']['data'][:, vecsel].T
# Rescale for better detection of small singular values
for v in range(p['data']['nrow']):
psize = sqrt(np.sum(this_vecs[:, v] * this_vecs[:, v]))
if psize > 0:
orig_n = p['data']['data'].any(axis=0).sum()
# Average ref still works if channels are removed
if len(vecsel) < 0.9 * orig_n and not inplace and \
(p['kind'] != FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF or
len(vecsel) == 1):
warn('Projection vector "%s" has magnitude %0.2f '
'(should be unity), applying projector with '
'%s/%s of the original channels available may '
'be dangerous, consider recomputing and adding '
'projection vectors for channels that are '
'eventually used. If this is intentional, '
'consider using info.normalize_proj()'
% (p['desc'], psize, len(vecsel), orig_n))
this_vecs[:, v] /= psize
nonzero += 1
# If doing "inplace" mode, "fix" the projectors to only operate
# on this subset of channels.
if inplace:
p['data']['data'] = this_vecs[sel].T
p['data']['col_names'] = [p['data']['col_names'][ii]
for ii in vecsel]
nvec += p['data']['nrow']
# Check whether all of the vectors are exactly zero
if nonzero == 0 or inplace:
return default_return
# Reorthogonalize the vectors
U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
# Throw away the linearly dependent guys
nproj = np.sum((S / S[0]) > 1e-2)
U = U[:, :nproj]
# Here is the celebrated result
proj = np.eye(nchan, nchan) - np.dot(U, U.T)
if nproj >= nchan: # e.g., 3 channels and 3 projectors
raise RuntimeError('Application of %d projectors for %d channels '
'will yield no components.' % (nproj, nchan))
return proj, nproj, U
def _normalize_proj(info):
"""Normalize proj after subselection to avoid warnings.
This is really only useful for tests, and might not be needed
eventually if we change or improve our handling of projectors
with picks.
"""
# Here we do info.get b/c info can actually be a noise cov
_make_projector(info['projs'], info.get('ch_names', info.get('names')),
info['bads'], include_active=True, inplace=True)
def make_projector_info(info, include_active=True):
"""Make an SSP operator using the measurement info.
Calls make_projector on good channels.
Parameters
----------
info : dict
Measurement info.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
"""
proj, nproj, _ = make_projector(info['projs'], info['ch_names'],
info['bads'], include_active)
return proj, nproj
@verbose
def activate_proj(projs, copy=True, verbose=None):
"""Set all projections to active.
Useful before passing them to make_projector.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Activate the projection items
for proj in projs:
proj['active'] = True
logger.info('%d projection items activated' % len(projs))
return projs
@verbose
def deactivate_proj(projs, copy=True, verbose=None):
"""Set all projections to inactive.
Useful before saving raw data without projectors applied.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Deactivate the projection items
for proj in projs:
proj['active'] = False
logger.info('%d projection items deactivated' % len(projs))
return projs
@verbose
def make_eeg_average_ref_proj(info, activate=True, verbose=None):
"""Create an EEG average reference SSP projection vector.
Parameters
----------
info : dict
Measurement info.
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
eeg_proj: instance of Projection
The SSP/PCA projector.
"""
if info.get('custom_ref_applied', False):
raise RuntimeError('A custom reference has been applied to the '
'data earlier. Please use the '
'mne.io.set_eeg_reference function to move from '
'one EEG reference to another.')
logger.info("Adding average EEG reference projection.")
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
ch_names = info['ch_names']
eeg_names = [ch_names[k] for k in eeg_sel]
n_eeg = len(eeg_sel)
if n_eeg == 0:
raise ValueError('Cannot create EEG average reference projector '
'(no EEG data found)')
vec = np.ones((1, n_eeg))
vec /= n_eeg
explained_var = None
eeg_proj_data = dict(col_names=eeg_names, row_names=None,
data=vec, nrow=1, ncol=n_eeg)
eeg_proj = Projection(active=activate, data=eeg_proj_data,
desc='Average EEG reference',
kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF,
explained_var=explained_var)
return eeg_proj
def _has_eeg_average_ref_proj(projs, check_active=False):
"""Determine if a list of projectors has an average EEG ref.
Optionally, set check_active=True to additionally check if the CAR
has already been applied.
"""
for proj in projs:
if (proj['desc'] == 'Average EEG reference' or
proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):
if not check_active or proj['active']:
return True
return False
def _needs_eeg_average_ref_proj(info):
"""Determine if the EEG needs an averge EEG reference.
This returns True if no custom reference has been applied and no average
reference projection is present in the list of projections.
"""
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
return (len(eeg_sel) > 0 and
not info['custom_ref_applied'] and
not _has_eeg_average_ref_proj(info['projs']))
@verbose
def setup_proj(info, add_eeg_ref=True, activate=True, verbose=None):
"""Set up projection for Raw and Epochs.
Parameters
----------
info : dict
The measurement info.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
projector : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
info : dict
The modified measurement info (Warning: info is modified inplace).
"""
# Add EEG ref reference proj if necessary
if add_eeg_ref and _needs_eeg_average_ref_proj(info):
eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
info['projs'].append(eeg_proj)
# Create the projector
projector, nproj = make_projector_info(info)
if nproj == 0:
if verbose:
logger.info('The projection vectors do not apply to these '
'channels')
projector = None
else:
logger.info('Created an SSP operator (subspace dimension = %d)'
% nproj)
# The projection items have been activated
if activate:
info['projs'] = activate_proj(info['projs'], copy=False)
return projector, info
def _uniquify_projs(projs, check_active=True, sort=True):
"""Make unique projs."""
final_projs = []
for proj in projs: # flatten
if not any(_proj_equal(p, proj, check_active) for p in final_projs):
final_projs.append(proj)
my_count = count(len(final_projs))
def sorter(x):
"""Sort in a nice way."""
digits = [s for s in x['desc'] if s.isdigit()]
if digits:
sort_idx = int(digits[-1])
else:
sort_idx = next(my_count)
return (sort_idx, x['desc'])
return sorted(final_projs, key=sorter) if sort else final_projs
| bsd-3-clause |
FernanOrtega/DAT210x | Module4/assignment4.py | 1 | 3509 | import pandas as pd
import numpy as np
import scipy.io
import random, math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def Plot2D(T, title, x, y, num_to_plot=40):
# This method picks a bunch of random samples (images in your case)
# to plot onto the chart:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
x_size = (max(T[:,x]) - min(T[:,x])) * 0.08
y_size = (max(T[:,y]) - min(T[:,y])) * 0.08
for i in range(num_to_plot):
img_num = int(random.random() * num_images)
x0, y0 = T[img_num,x]-x_size/2., T[img_num,y]-y_size/2.
x1, y1 = T[img_num,x]+x_size/2., T[img_num,y]+y_size/2.
img = df.iloc[img_num,:].reshape(num_pixels, num_pixels)
ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1))
# It also plots the full scatter:
ax.scatter(T[:,x],T[:,y], marker='.',alpha=0.7)
def Plot3D(T, title, x, y, z, num_to_plot=40):
# This method picks a bunch of random samples (images in your case)
# to plot onto the chart:
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
ax.set_zlabel('Component: {0}'.format(z))
x_size = (max(T[:,x]) - min(T[:,x])) * 0.08
y_size = (max(T[:,y]) - min(T[:,y])) * 0.08
for i in range(num_to_plot):
img_num = int(random.random() * num_images)
x0, y0 = T[img_num,x]-x_size/2., T[img_num,y]-y_size/2.
x1, y1 = T[img_num,x]+x_size/2., T[img_num,y]+y_size/2.
img = df.iloc[img_num,:].reshape(num_pixels, num_pixels)
ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1))
# It also plots the full scatter:
ax.scatter(T[:,x],T[:,y],T[:,z], c='r', marker='.',alpha=0.7)
# A .MAT file is a .MATLAB file. The faces dataset could have came
# in through .png images, but we'll show you how to do that in
# anither lab. For now, you'll see how to import .mats:
mat = scipy.io.loadmat('Datasets/face_data.mat')
df = pd.DataFrame(mat['images']).T
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
# Rotate the pictures, so we don't have to crane our necks:
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
#
# TODO: Implement PCA here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D.
#
# The format is: Plot2D(T, title, x, y, num_to_plot=40):
# T is your transformed data, NDArray.
# title is your chart title
# x is the principal component you want displayed on the x-axis, Can be 0 or 1
# y is the principal component you want displayed on the y-axis, Can be 1 or 2
#
from sklearn.decomposition import PCA
pca = PCA(n_components = 3, svd_solver='full')
T = pca.fit_transform(df)
Plot2D(T, 'PCA', 1, 2)
#
# TODO: Implement Isomap here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D using
# the first two components.
#
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=3, n_components=3)
iso.fit(df)
manifold = iso.transform(df)
Plot2D(manifold, 'Isomap', 1, 2)
#
# TODO: If you're up for a challenge, draw your dataframes in 3D
# Even if you're not, just do it anyway.
#
#Plot3D(T, 'PCA', 0, 1, 2)
#Plot3D(manifold, 'Isomap', 0, 1, 2)
plt.show()
| mit |
procoder317/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardShear/Normal_Load/Sigma_n_1e9/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
NikNitro/Python-iBeacon-Scan | sympy/plotting/plot_implicit.py | 83 | 14400 | """Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
line_color = kwargs.pop('line_color', "blue")
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points, line_color)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
| gpl-3.0 |
koverholt/bayes-fire | Example_Cases/Evac_Stairs/Scripts/run_evac_alphabeta_model.py | 1 | 6411 | #!/usr/bin/env python
"""
PyMC Bayesian Inference on Evacuation Data
Model 1: preevac_mu vs theta[0] + theta[1]*occupants + theta[2]*type
Model 2: exit_mu vs theta[0] + theta[1]*occupants + theta[2]*exit_distance + theta[3]*type
Model 3: traveltime_mu vs theta[0] + theta[1]*exit_distance + theta[2]*type
"""
import matplotlib
matplotlib.use("Agg")
import pylab as pl
import evac_flow_exit_mu_single_graphics as graphics
import pymc as mc
import evac_alphabeta_paper_model as models
import data_evac
# ============
# = Settings =
# ============
mcmc_iterations = 1000000
burn_iterations = 800000
thinning_parameter = 200
case_name = 'final_models'
dir_name = '../Figures/Mu_Paper_Models/'
project_name = 'alpha_model_'
# ===========================================================================================
# = Model 1 beta vs alpha =
# ===========================================================================================
# Generate model
model1_name = '_alphabeta_1'
vars1 = models.model1()
# Fit model with MAP estimates
map = mc.MAP(vars1)
map.fit(method='fmin_powell', verbose=2)
# Import model variables and set database options
m1 = mc.MCMC(vars1, db='sqlite', dbname=dir_name + project_name + case_name + model1_name + '.sqlite')
# Use adaptive Metropolis-Hastings step method
m1.use_step_method(mc.AdaptiveMetropolis, [m1.theta])
# Configure and run MCMC simulation
m1.sample(iter=mcmc_iterations, burn=burn_iterations, thin=thinning_parameter)
# Plot traces and model with mean values
# pl.figure(figsize=(12,9))
# graphics.plot_evac_data()
# graphics.plot_model1(m1)
# pl.savefig('../Figures/Higher_Order_Models/flow_' + case_name + '_evac_model1.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m1, format='pdf', path=dir_name + project_name + case_name + model1_name,
common_scale=False)
# ===========================================================================================
# = Model 2 beta vs alpha =
# ===========================================================================================
# Generate model
model2_name = '_alphabeta_2'
vars2 = models.model2()
# Fit model with MAP estimates
map = mc.MAP(vars2)
map.fit(method='fmin_powell', verbose=2)
# Import model variables and set database options
m2 = mc.MCMC(vars2, db='sqlite', dbname=dir_name + project_name + case_name + model2_name + '.sqlite')
# Use adaptive Metropolis-Hastings step method
m2.use_step_method(mc.AdaptiveMetropolis, [m2.theta])
# Configure and run MCMC simulation
m2.sample(iter=mcmc_iterations, burn=burn_iterations, thin=thinning_parameter)
# Plot traces and model with mean values
# pl.figure(figsize=(12,9))
# graphics.plot_evac_data()
# graphics.plot_model1(m1)
# pl.savefig('../Figures/Higher_Order_Models/flow_' + case_name + '_evac_model1.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m2, format='pdf', path=dir_name + project_name + case_name + model2_name,
common_scale=False)
# ===========================================================================================
# = Model 3 beta vs alpha =
# ===========================================================================================
# Generate model
model3_name = '_alphabeta_3'
vars3 = models.model3()
# Fit model with MAP estimates
map = mc.MAP(vars3)
map.fit(method='fmin_powell', verbose=2)
# Import model variables and set database options
m3 = mc.MCMC(vars3, db='sqlite', dbname=dir_name + project_name + case_name + model3_name + '.sqlite')
# Use adaptive Metropolis-Hastings step method
m3.use_step_method(mc.AdaptiveMetropolis, [m3.theta])
# Configure and run MCMC simulation
m3.sample(iter=mcmc_iterations, burn=burn_iterations, thin=thinning_parameter)
# Plot traces and model with mean values
# pl.figure(figsize=(12,9))
# graphics.plot_evac_data()
# graphics.plot_model1(m1)
# pl.savefig('../Figures/Higher_Order_Models/flow_' + case_name + '_evac_model1.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m3, format='pdf', path=dir_name + project_name + case_name + model3_name,
common_scale=False)
# ===========================================================================================
# = Model 4 beta vs alpha =
# ===========================================================================================
# Generate model
model4_name = '_alphabeta_4'
vars4 = models.model4()
# Fit model with MAP estimates
map = mc.MAP(vars4)
map.fit(method='fmin_powell', verbose=2)
# Import model variables and set database options
m4 = mc.MCMC(vars4, db='sqlite', dbname=dir_name + project_name + case_name + model4_name + '.sqlite')
# Use adaptive Metropolis-Hastings step method
m4.use_step_method(mc.AdaptiveMetropolis, [m4.theta])
# Configure and run MCMC simulation
m4.sample(iter=mcmc_iterations, burn=burn_iterations, thin=thinning_parameter)
# Plot traces and model with mean values
# pl.figure(figsize=(12,9))
# graphics.plot_evac_data()
# graphics.plot_model1(m1)
# pl.savefig('../Figures/Higher_Order_Models/flow_' + case_name + '_evac_model1.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m4, format='pdf', path=dir_name + project_name + case_name + model4_name,
common_scale=False)
# =================
# = Print results =
# =================
# Display results
print "Results for Model 1 " + model1_name
m1.theta.summary()
print "Results for Model 2 " + model2_name
m2.theta.summary()
print "Results for Model 3 " + model3_name
m3.theta.summary()
print "Results for Model 4 " + model4_name
m4.theta.summary()
# Write results to file
m1.write_csv(dir_name + project_name + case_name + model1_name + '.csv')
m2.write_csv(dir_name + project_name + case_name + model2_name + '.csv')
m3.write_csv(dir_name + project_name + case_name + model3_name + '.csv')
m4.write_csv(dir_name + project_name + case_name + model4_name + '.csv')
# Find DIC
print 'DIC (Model 1) = %f' % m1.dic
print 'DIC (Model 2) = %f' % m2.dic
print 'DIC (Model 3) = %f' % m3.dic
print 'DIC (Model 4) = %f' % m4.dic | bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
maciekcc/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
alolou/adr | src/maxent_we.py | 1 | 1944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from gensim.models import Word2Vec
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import scale
# from TextUtility import TextUtility
def makeFeatureVec(words, model, num_features):
featureVec = np.zeros((num_features,), dtype="float32")
nwords = 0
index2word_set = set(model.index2word)
for word in words:
if word in index2word_set:
nwords = nwords + 1
featureVec = np.add(featureVec, model[word])
if nwords != 0:
featureVec /= nwords
return featureVec
def getAvgFeatureVecs(texts, model, num_features):
counter = 0
textFeatureVecs = np.zeros((len(texts), num_features), dtype="float32")
for text in texts:
textFeatureVecs[counter] = makeFeatureVec(text, model, num_features)
counter = counter + 1
return textFeatureVecs
def getClean(data):
clean_data = []
for text in data["text"]:
clean_data.append(TextUtility.text_to_wordlist(text, True))
return clean_data
def run_we(train, test, f_we, n_dim, clf=LogisticRegression(class_weight="auto")):
try:
model = Word2Vec.load_word2vec_format(f_we, binary=False)
except:
print ("Error in loading word embeddings")
exit(1)
X_train = scale(getAvgFeatureVecs(getClean(train), model, n_dim))
X_test = scale(getAvgFeatureVecs(getClean(test), model, n_dim))
y_train = train['label']
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
try:
y_prob = clf.predict_proba(X_test)
except:
# for svm with probability output
clf.set_params(probability=True)
y_prob_pos = clf.predict(X_test)
y_prob_neg = np.ones(X_test.shape[0]) - y_prob_pos
y_prob = np.column_stack((y_prob_neg, y_prob_pos))
return y_pred, y_prob
| gpl-2.0 |
sanketloke/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 8 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Licence: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
Bleyddyn/malpi | dk/scripts/vae_generator.py | 1 | 4487 | from sklearn.utils import shuffle
import numpy as np
from donkeycar.parts.augment import augment_image
from donkeycar.parts.datastore import Tub
from donkeycar.utils import load_scaled_image_arr
import keras
def vae_generator(cfg, data, batch_size, isTrainSet=True, min_records_to_train=1000, aug=False, aux=None, pilot=False):
num_records = len(data)
while True:
batch_data = []
keys = list(data.keys())
keys = shuffle(keys)
for key in keys:
if not key in data:
continue
_record = data[key]
if _record['train'] != isTrainSet:
continue
batch_data.append(_record)
if len(batch_data) == batch_size:
inputs_img = []
aux_out = []
steering = []
throttle = []
for record in batch_data:
img_arr = None
#get image data if we don't already have it
if record['img_data'] is None:
img_arr = load_scaled_image_arr(record['image_path'], cfg)
if img_arr is None:
break
if aug:
img_arr = augment_image(img_arr)
if cfg.CACHE_IMAGES:
record['img_data'] = img_arr
else:
img_arr = record['img_data']
if img_arr is None:
continue
inputs_img.append(img_arr)
if aux is not None:
if aux in record['json_data']:
aux_out.append(record['json_data'][aux])
else:
print( "Missing aux data in: {}".format( record ) )
continue
st, th = Tub.get_angle_throttle(record['json_data'])
steering.append(st)
throttle.append(th)
X = np.array(inputs_img).reshape(batch_size, cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)
y = {'main_output': X}
if pilot:
y['steering_output'] = np.array(steering)
y['throttle_output'] = np.array(throttle)
if aux is not None:
aux_out = keras.utils.to_categorical(aux_out, num_classes=7)
y['aux_output'] = aux_out
yield X, y
batch_data = []
if __name__ == "__main__":
import argparse
import donkeycar as dk
from donkeycar.templates.train import collate_records, preprocessFileList
from donkeycar.utils import gather_records
parser = argparse.ArgumentParser(description='Test VAE data loader.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--aux', default=None, help='Name of the auxilliary data to use.')
parser.add_argument('file', nargs='+', help='Text file with a list of tubs to train on.')
args = parser.parse_args()
try:
cfg = dk.load_config()
except FileNotFoundError:
cfg = dk.load_config("config.py") # retry in the current directory
tub_names = preprocessFileList( args.file )
input_shape = (cfg.IMAGE_W, cfg.IMAGE_H, cfg.IMAGE_DEPTH)
# Code for multiple inputs: http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/
aux_out = 0
if args.aux is not None:
aux_out = 7 # need to get number of aux outputs from data
opts = { 'cfg' : cfg}
opts['categorical'] = False
opts['continuous'] = False
gen_records = {}
records = gather_records(cfg, tub_names, verbose=True)
collate_records(records, gen_records, opts)
train_gen = vae_generator(cfg, gen_records, cfg.BATCH_SIZE, isTrainSet=True, aug=False, aux=args.aux, pilot=True)
for X, y in train_gen:
print( "X {} {}".format( type(X[0]), X[0].shape ) )
img = y['main_output'][0]
print( "main {} min/max/avg: {}/{}/{}".format( img.shape, np.min(img), np.max(img), np.mean(img) ) )
if 'aux_output' in y:
print( "aux {}".format( y['aux_output'].shape ) )
if 'steering_output' in y:
print( "Steering {}".format( y['steering_output'].shape ) )
break
| mit |
jjx02230808/project0223 | sklearn/tests/test_multiclass.py | 5 | 21409 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
jasonost/clinicaltrials | nlp/MeSHprediction_parallel.py | 1 | 5584 | import nltk, codecs, string, random, math, cPickle as pickle, re, multiprocessing
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
import numpy as np
from sklearn.metrics.pairwise import linear_kernel
from __future__ import division
sent_tokenizer=nltk.data.load('tokenizers/punkt/english.pickle')
stopset = set(nltk.corpus.stopwords.words('english'))
corrections = {"Sarcoma, Ewing's": 'Sarcoma, Ewing',
'Beta-Thalassemia': 'beta-Thalassemia',
'Von Willebrand Disease, Type 3': 'von Willebrand Disease, Type 3',
'Von Willebrand Disease, Type 2': 'von Willebrand Disease, Type 2',
'Von Willebrand Disease, Type 1': 'von Willebrand Disease, Type 1',
'Felty''s Syndrome': 'Felty Syndrome',
'Von Hippel-Lindau Disease': 'von Hippel-Lindau Disease',
'Retrognathism': 'Retrognathia',
'Regurgitation, Gastric': 'Laryngopharyngeal Reflux',
'Persistent Hyperinsulinemia Hypoglycemia of Infancy': 'Congenital Hyperinsulinism',
'Von Willebrand Diseases': 'von Willebrand Diseases',
'Pontine Glioma': 'Brain Stem Neoplasms',
'Mental Retardation': 'Intellectual Disability',
'Overdose': 'Drug Overdose',
'Beta-Mannosidosis': 'beta-Mannosidosis',
'Alpha 1-Antitrypsin Deficiency': 'alpha 1-Antitrypsin Deficiency',
'Intervertebral Disk Displacement': 'Intervertebral Disc Displacement',
'Alpha-Thalassemia': 'alpha-Thalassemia',
'Mycobacterium Infections, Atypical': 'Mycobacterium Infections, Nontuberculous',
'Legg-Perthes Disease': 'Legg-Calve-Perthes Disease',
'Intervertebral Disk Degeneration': 'Intervertebral Disc Degeneration',
'Alpha-Mannosidosis': 'alpha-Mannosidosis',
'Gestational Trophoblastic Disease': 'Gestational Trophoblastic Neoplasms'
}
cond = {}
cond_r = {}
for row in codecs.open('../data/condition_browse.txt','r','utf-8').readlines():
row_id, trial_id, mesh_term = row.strip().split('|')
if mesh_term in corrections: mesh_term = corrections[mesh_term]
if mesh_term not in cond: cond[mesh_term] = []
cond[mesh_term].append(trial_id)
if trial_id not in cond_r: cond_r[trial_id] = []
cond_r[trial_id].append(mesh_term)
mesh_codes = {}
mesh_codes_r = {}
for row in codecs.open('../data/mesh_thesaurus.txt','r','utf-8').readlines():
row_id, mesh_id, mesh_term = row.strip().split('|')
mesh_codes[mesh_id] = mesh_term
if mesh_term not in mesh_codes_r: mesh_codes_r[mesh_term] = []
mesh_codes_r[mesh_term].append(mesh_id)
# limiting to conditions that appear in ten or more trials
top_cond = {c for c in cond if len(cond[c]) >= 10}
trials = {t for c in top_cond for t in cond[c]}
trial_desc = pickle.load(open('../data/trial_desc.pkl','rb'))
to_classify = [t for t in trial_desc if t not in trials]
cond_text = pickle.load(open('../data/mesh_level2_textcount_holdout.pkl','rb'))
total_text = pickle.load(open('../data/mesh_level2_alltextcount_holdout.pkl','rb'))
to_classify = pickle.load(open('../data/to_classify_holdout.pkl','rb'))
mesh_models = pickle.load(open('../data/mesh_models_series_holdout.pkl','rb'))
def process_text(text):
return [word.lower()
for sent in sent_tokenizer.tokenize(text)
for word in nltk.word_tokenize(sent)
if word.lower() not in stopset and
sum(1 for char in word if char not in string.punctuation) > 0]
classify_text = {trial_id: Counter([word
for desc in trial_desc[trial_id]
if len(desc) > 0
for word in process_text(desc)])
for trial_id in to_classify}
pickle.dump(classify_text,open('../data/classify_text.pkl','wb'))
total_text_keys, total_text_values = zip(*[(k, v)
for k, v in total_text.items()
if len(k) > 2 and sum([1
for char in k
if char not in '1234567890']) > 0])
other_text_len = sum(total_text_values)
def guess_data(c):
text_len = sum([v
for k, v in classify_text[c].items()
if len(k) > 2 and sum([1
for char in k
if char not in '1234567890']) > 0])
if text_len > 0:
# create set of tuples (term % of target descriptor text, term % of other MeSH descriptor text)
vecs = [classify_text[c][t] / text_len
for t in total_text.keys()
if len(t) > 2 and sum([1
for char in t
if char not in '1234567890']) > 0]
# predict logistic models
predictions = {}
for term, model in mesh_models.items():
predictions[term] = model.predict_proba(vecs)[0][1]
else:
predictions = None
return predictions
pool = multiprocessing.Pool(processes=16)
preds = pool.map(guess_data, classify_text.keys())
pool.close()
pool.join()
guesses = dict(zip(classify_text.keys(),preds))
pickle.dump(guesses,open('../data/mesh_guesses_holdout.pkl','wb'))
| mit |
ehashman/oh-mainline | vendor/packages/mechanize/test/test_performance.py | 22 | 2573 | import os
import time
import sys
import unittest
import mechanize
from mechanize._testcase import TestCase, TempDirMaker
from mechanize._rfc3986 import urljoin
KB = 1024
MB = 1024**2
GB = 1024**3
def time_it(operation):
t = time.time()
operation()
return time.time() - t
def write_data(filename, nr_bytes):
block_size = 4096
block = "01234567" * (block_size // 8)
fh = open(filename, "w")
try:
for i in range(nr_bytes // block_size):
fh.write(block)
finally:
fh.close()
def time_retrieve_local_file(temp_maker, size, retrieve_fn):
temp_dir = temp_maker.make_temp_dir()
filename = os.path.join(temp_dir, "data")
write_data(filename, size)
def operation():
retrieve_fn(urljoin("file://", filename),
os.path.join(temp_dir, "retrieved"))
return time_it(operation)
class PerformanceTests(TestCase):
def test_retrieve_local_file(self):
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
size = 100 * MB
# size = 1 * KB
desired_rate = 2*MB # per second
desired_time = size / float(desired_rate)
fudge_factor = 2.
self.assert_less_than(
time_retrieve_local_file(self, size, retrieve),
desired_time * fudge_factor)
def show_plot(rows):
import matplotlib.pyplot
figure = matplotlib.pyplot.figure()
axes = figure.add_subplot(111)
axes.plot([row[0] for row in rows], [row[1] for row in rows])
matplotlib.pyplot.show()
def power_2_range(start, stop):
n = start
while n <= stop:
yield n
n *= 2
def performance_plot():
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
# import urllib2
# def retrieve(url, filename):
# urllib2.urlopen(url).read()
# from mechanize import _useragent
# ua = _useragent.UserAgent()
# ua.set_seekable_responses(True)
# ua.set_handle_equiv(False)
# def retrieve(url, filename):
# ua.retrieve(url, filename)
rows = []
for size in power_2_range(256 * KB, 256 * MB):
temp_maker = TempDirMaker()
try:
elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
finally:
temp_maker.tear_down()
rows.append((size//float(MB), elapsed))
show_plot(rows)
if __name__ == "__main__":
args = sys.argv[1:]
if "--plot" in args:
performance_plot()
else:
unittest.main()
| agpl-3.0 |
puavo-org/puavo-os | parts/wlan/mapper/setup.py | 1 | 1692 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Opinsys Oy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from distutils.core import setup
import os.path
setup(name='puavowlanmapper',
version='0.1',
description='Simple APT Repository Tool.',
author='Tuomas Räsänen',
author_email='tuomasjjrasanen@tjjr.fi',
url='http://github.com/opinsys/puavo-wlan',
scripts=['puavo-wlanmapper'],
packages=['puavowlanmapper'],
license='GPLv2+',
platforms=['Linux'],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications :: Qt",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: POSIX :: Linux",
"Topic :: System :: Networking :: Monitoring",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
requires=['matplotlib', 'numpy', 'scipy'],
provides=['puavowlanmapper'],
) | gpl-2.0 |
jbernhard/qm2017 | qm/plots.py | 1 | 33231 | """ plots / visualizations / figures """
import colorsys
import itertools
import logging
from pathlib import Path
import subprocess
import tempfile
import warnings
import h5py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import lines
from matplotlib import patches
from matplotlib import ticker
from scipy.interpolate import PchipInterpolator
from sklearn.decomposition import PCA
from sklearn.gaussian_process import GaussianProcessRegressor as GPR
from sklearn.gaussian_process import kernels
from sklearn.mixture import GaussianMixture
from . import workdir, systems, parse_system, expt, model, mcmc
from .design import Design
def darken(rgba, amount=.5):
h, l, s = colorsys.rgb_to_hls(*rgba[:3])
r, g, b = colorsys.hls_to_rgb(h, l*amount, s)
try:
return r, g, b, rgba[3]
except IndexError:
return r, g, b
fontsmall, fontnormal, fontlarge = 5, 6, 7
offblack = '#262626'
aspect = 1/1.618
resolution = 72.27
textwidth = 307.28987/resolution
textheight = 261.39864/resolution
fullwidth = 350/resolution
fullheight = 270/resolution
plt.rcdefaults()
plt.rcParams.update({
'font.family': 'sans-serif',
'font.sans-serif': ['Lato'],
'mathtext.fontset': 'custom',
'mathtext.default': 'it',
'mathtext.rm': 'sans',
'mathtext.it': 'sans:italic:medium',
'mathtext.cal': 'sans',
'font.size': fontnormal,
'legend.fontsize': fontnormal,
'axes.labelsize': fontnormal,
'axes.titlesize': fontlarge,
'xtick.labelsize': fontsmall,
'ytick.labelsize': fontsmall,
'font.weight': 400,
'axes.labelweight': 400,
'axes.titleweight': 400,
'lines.linewidth': .5,
'lines.markersize': 3,
'lines.markeredgewidth': 0,
'patch.linewidth': .5,
'axes.linewidth': .4,
'xtick.major.width': .4,
'ytick.major.width': .4,
'xtick.minor.width': .4,
'ytick.minor.width': .4,
'xtick.major.size': 1.2,
'ytick.major.size': 1.2,
'xtick.minor.size': .8,
'ytick.minor.size': .8,
'xtick.major.pad': 1.5,
'ytick.major.pad': 1.5,
'axes.labelpad': 3,
'text.color': offblack,
'axes.edgecolor': offblack,
'axes.labelcolor': offblack,
'xtick.color': offblack,
'ytick.color': offblack,
'legend.numpoints': 1,
'legend.scatterpoints': 1,
'legend.frameon': False,
'image.interpolation': 'none',
'pdf.fonttype': 42
})
plotdir = workdir / 'plots'
plotdir.mkdir(exist_ok=True)
plot_functions = {}
def plot(f):
"""
Plot function decorator. Calls the function, does several generic tasks,
and saves the figure as the function name.
"""
def wrapper(*args, **kwargs):
logging.info('generating plot: %s', f.__name__)
f(*args, **kwargs)
fig = plt.gcf()
if getattr(fig, 'despine', True):
despine(*fig.axes)
if not fig.get_tight_layout():
set_tight(fig)
plotfile = plotdir / '{}.pdf'.format(f.__name__)
fig.savefig(str(plotfile))
logging.info('wrote %s', plotfile)
plt.close(fig)
plot_functions[f.__name__] = wrapper
return wrapper
def despine(*axes):
"""
Remove the top and right spines.
"""
if not axes:
axes = plt.gcf().axes
for ax in axes:
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for xy, pos in [('x', 'bottom'), ('y', 'left')]:
axis = getattr(ax, xy + 'axis')
if axis.get_ticks_position() == 'default':
axis.set_ticks_position(pos)
def set_tight(fig=None, **kwargs):
"""
Set tight_layout with a better default pad.
"""
if fig is None:
fig = plt.gcf()
kwargs.setdefault('pad', .1)
fig.set_tight_layout(kwargs)
def remove_ticks(*axes):
"""
Remove all tick marks (but not labels).
"""
if not axes:
axes = plt.gcf().axes
for ax in axes:
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
def auto_ticks(
ax, xy=None, nbins=5, steps=[1, 2, 4, 5, 10],
prune=None, minor=0
):
"""
Convenient interface to matplotlib.ticker locators.
"""
if xy == 'x':
axes = ax.xaxis,
elif xy == 'y':
axes = ax.yaxis,
else:
axes = ax.xaxis, ax.yaxis
for axis in axes:
axis.set_major_locator(
ticker.MaxNLocator(nbins=nbins, steps=steps, prune=prune)
)
if minor:
axis.set_minor_locator(ticker.AutoMinorLocator(minor))
def _observables_plots():
charged_parts = [('dNch_deta', None, r'$N_\mathrm{ch}$', 'Greys')]
def id_parts(obs):
return [
(obs, 'pion', r'$\pi^\pm$', 'Blues'),
(obs, 'kaon', r'$K^\pm$', 'Greens'),
(obs, 'proton', r'$p\bar p$', 'Reds'),
]
flows = [
('vn', n, '$v_{}$'.format(n), c)
for n, c in enumerate(['GnBu', 'Purples', 'Oranges'], start=2)
]
return [
('Yields', r'$dN_\mathrm{ch}/d\eta,\ dN/dy$', (1., 2e4),
charged_parts + id_parts('dN_dy')),
('Mean $p_T$', r'$p_T$ [GeV]', (0, 2.), id_parts('mean_pT')),
('Flow cumulants', r'$v_n\{2\}$', (0, 0.15), flows),
]
def _observables(posterior=False):
"""
Model observables at all design points or drawn from the posterior with
experimental data points.
"""
plots = _observables_plots()
fig, axes = plt.subplots(
nrows=len(systems), ncols=len(plots),
figsize=(fullwidth, .55*fullwidth)
)
if posterior:
samples = mcmc.Chain().samples(100)
for (system, (title, ylabel, ylim, subplots)), ax in zip(
itertools.product(systems, plots), axes.flat
):
for obs, subobs, label, cmap in subplots:
factor = 5 if obs == 'dNch_deta' else 1
color = getattr(plt.cm, cmap)(.6)
x = model.data[system][obs][subobs]['x']
Y = (
samples[system][obs][subobs]
if posterior else
model.data[system][obs][subobs]['Y']
)
for y in Y * factor:
ax.plot(x, y, color=color, alpha=.08, lw=.3)
ax.text(
x[-1] + 2.5,
model.map_data[system][obs][subobs]['Y'][-1] * factor,
label,
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
y = dset['y'] * factor
yerr = np.sqrt(sum(
e**2 for e in dset['yerr'].values()
)) * factor
ax.errorbar(
x, y, yerr=yerr, fmt='o', ms=1.7,
capsize=0, color='.25', zorder=1000
)
if title == 'Yields':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
if ax.is_first_row():
ax.set_title(title)
elif ax.is_last_row():
ax.set_xlabel('Centrality %')
if ax.is_last_col():
proj, energy = parse_system(system)
ax.text(
1.07, .5, '{} {:.2f} TeV'.format('+'.join(proj), energy/1000),
transform=ax.transAxes, ha='left', va='center',
size=plt.rcParams['axes.titlesize'], rotation=-90
)
ax.set_ylabel(ylabel)
ax.set_ylim(ylim)
set_tight(fig, w_pad=1, rect=[0, 0, .97, 1])
@plot
def observables_design():
_observables(posterior=False)
@plot
def observables_posterior():
_observables(posterior=True)
@plot
def observables_map():
"""
Model observables and ratio to experiment at the maximum a posteriori
(MAP) estimate.
"""
plots = _observables_plots()
fig = plt.figure(figsize=(fullwidth, .85*fullheight))
gs = plt.GridSpec(3*len(systems), len(plots))
for (nsys, system), (nplot, (title, ylabel, ylim, subplots)) in \
itertools.product(enumerate(systems), enumerate(plots)):
nrow = 3*nsys
ax = fig.add_subplot(gs[nrow:nrow+2, nplot])
ratio_ax = fig.add_subplot(gs[nrow+2, nplot])
for obs, subobs, label, cmap in subplots:
factor = 5 if obs == 'dNch_deta' else 1
color = getattr(plt.cm, cmap)(.6)
x = model.map_data[system][obs][subobs]['x']
y = model.map_data[system][obs][subobs]['Y'] * factor
ax.plot(x, y, color=color, lw=.5)
ax.text(
x[-1] + 2.5,
model.map_data[system][obs][subobs]['Y'][-1] * factor,
label,
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
yexp = dset['y'] * factor
yerr = dset['yerr']
ax.errorbar(
x, yexp, yerr=yerr.get('stat'), fmt='o', ms=1.7,
capsize=0, color='.25', zorder=1000
)
yerrsys = yerr.get('sys', yerr.get('sum'))
ax.fill_between(
x, yexp - yerrsys, yexp + yerrsys,
color='.9', zorder=-10
)
ratio_ax.plot(x, y/yexp, color=color)
if title == 'Yields':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
if ax.is_first_row():
ax.set_title(title)
elif ratio_ax.is_last_row():
ratio_ax.set_xlabel('Centrality %')
if ax.is_last_col():
proj, energy = parse_system(system)
ax.text(
1.07, 0, '{} {:.2f} TeV'.format('+'.join(proj), energy/1000),
transform=ax.transAxes, ha='left', va='bottom',
size=plt.rcParams['axes.titlesize'], rotation=-90
)
ax.set_ylabel(ylabel)
ax.set_ylim({'mean_pT': (0, 1.75), 'vn': (0, .12)}.get(obs, ylim))
ratio_ax.axhline(1, lw=.5, color='0.5', zorder=-100)
ratio_ax.axhspan(0.9, 1.1, color='0.95', zorder=-200)
ratio_ax.text(
ratio_ax.get_xlim()[1], .9, '±10%',
color='.6', zorder=-50,
ha='right', va='bottom',
size=plt.rcParams['xtick.labelsize']
)
ratio_ax.set_ylim(0.8, 1.2)
ratio_ax.set_yticks(np.arange(80, 121, 20)/100)
ratio_ax.set_ylabel('Ratio')
set_tight(fig, w_pad=1, rect=[0, 0, .97, 1])
def format_ci(samples, ci=.9):
"""
Compute the median and a credible interval for an array of samples and
return a TeX-formatted string.
"""
cil, cih = mcmc.credible_interval(samples, ci=ci)
median = np.median(samples)
ul = median - cil
uh = cih - median
# decide precision for formatting numbers
# this is NOT general but it works for the present data
if abs(median) < .2 and ul < .02:
precision = 3
elif abs(median) < 1:
precision = 2
else:
precision = 1
fmt = str(precision).join(['{:#.', 'f}'])
return ''.join([
'$', fmt.format(median),
'_{-', fmt.format(ul), '}',
'^{+', fmt.format(uh), '}$'
])
def _posterior(params=None, ignore=None, scale=1, padr=.99, padt=.98):
"""
Triangle plot of posterior marginal and joint distributions.
"""
chain = mcmc.Chain()
if params is None and ignore is None:
params = set(chain.keys)
elif params is not None:
params = set(params)
elif ignore is not None:
params = set(chain.keys) - set(ignore)
keys, labels, ranges = map(list, zip(*(
i for i in zip(chain.keys, chain.labels, chain.range)
if i[0] in params
)))
ndim = len(params)
data = chain.load(*keys).T
cmap = plt.cm.Blues
line_color = cmap(.8)
fill_color = cmap(.5, alpha=.1)
fig, axes = plt.subplots(
nrows=ndim, ncols=ndim,
sharex='col', sharey='row',
figsize=2*(scale*fullheight,)
)
for ax, d, lim in zip(axes.diagonal(), data, ranges):
counts, edges = np.histogram(d, bins=50, range=lim)
x = (edges[1:] + edges[:-1]) / 2
y = .85 * (lim[1] - lim[0]) * counts / counts.max() + lim[0]
# smooth histogram with monotonic cubic interpolation
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 10*x.size)
y = interp(x)
ax.plot(x, y, lw=.5, color=line_color)
ax.fill_between(x, lim[0], y, color=fill_color, zorder=-10)
ax.set_xlim(lim)
ax.set_ylim(lim)
ticks = [lim[0], (lim[0] + lim[1])/2, lim[1]]
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.annotate(
format_ci(d),
(.62, .87 if plt.rcParams['font.family'] == ['serif'] else .92),
xycoords='axes fraction', ha='center', va='bottom', fontsize=4.5
)
for ny, nx in zip(*np.tril_indices_from(axes, k=-1)):
H, xedges, yedges = np.histogram2d(
data[nx], data[ny], bins=100,
range=(ranges[nx], ranges[ny])
)
H[H == 0] = None
axes[ny][nx].pcolorfast(xedges, yedges, H.T, cmap=cmap)
axes[nx][ny].set_axis_off()
for n, label in enumerate(labels):
for ax, xy in [(axes[-1, n], 'x'), (axes[n, 0], 'y')]:
getattr(ax, 'set_{}label'.format(xy))(
label.replace(r'\ [', '$\n$['), fontdict=dict(size=4)
)
ticklabels = getattr(ax, 'get_{}ticklabels'.format(xy))()
for t in ticklabels:
t.set_fontsize(3)
if (
scale / ndim < .13 and
xy == 'x' and
len(str(sum(ranges[n])/2)) > 4
):
t.set_rotation(30)
if xy == 'x':
ticklabels[0].set_horizontalalignment('left')
ticklabels[-1].set_horizontalalignment('right')
else:
ticklabels[0].set_verticalalignment('bottom')
ticklabels[-1].set_verticalalignment('top')
set_tight(fig, pad=.05, h_pad=.3, w_pad=.3, rect=[0., 0., padr, padt])
@plot
def posterior():
_posterior(
ignore={'norm {}'.format(s) for s in systems} | {'dmin3', 'etas_hrg'}
)
@plot
def posterior_withnorm():
_posterior(scale=1.2, ignore={'dmin3', 'etas_hrg'})
@plot
def posterior_shear():
_posterior(
scale=.35, padt=.96, padr=1.,
params={'etas_min', 'etas_slope', 'etas_curv'}
)
@plot
def posterior_bulk():
_posterior(
scale=.3, padt=.96, padr=1.,
params={'zetas_max', 'zetas_width'}
)
@plot
def posterior_p():
"""
Distribution of trento p parameter with annotations for other models.
"""
plt.figure(figsize=(.65*textwidth, .25*textwidth))
ax = plt.axes()
data = mcmc.Chain().load('trento_p').ravel()
counts, edges = np.histogram(data, bins=50)
x = (edges[1:] + edges[:-1]) / 2
y = counts / counts.max()
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 10*x.size)
y = interp(x)
ax.plot(x, y, color=plt.cm.Blues(0.8))
ax.fill_between(x, y, color=plt.cm.Blues(0.15), zorder=-10)
ax.set_xlabel('$p$')
for spine in ax.spines.values():
spine.set_visible(False)
for label, x, err in [
('KLN', -.67, .01),
('EKRT /\nIP-Glasma', 0, .1),
('Wounded\nnucleon', 1, None),
]:
args = ([x], [0], 'o') if err is None else ([x - err, x + err], [0, 0])
ax.plot(*args, lw=4, ms=4, color=offblack, alpha=.58, clip_on=False)
if label.startswith('EKRT'):
x -= .275
ax.text(x, .05, label, va='bottom', ha='center')
ax.text(.1, .8, format_ci(data))
ax.set_xticks(np.arange(-10, 11, 5)/10)
ax.set_xticks(np.arange(-75, 76, 50)/100, minor=True)
for t in ax.get_xticklabels():
t.set_y(-.03)
xm = 1.2
ax.set_xlim(-xm, xm)
ax.add_artist(
patches.FancyArrowPatch(
(-xm, 0), (xm, 0),
linewidth=.6,
arrowstyle=patches.ArrowStyle.CurveFilledAB(
head_length=3, head_width=1.5
),
facecolor=offblack, edgecolor=offblack,
clip_on=False, zorder=100
)
)
ax.set_yticks([])
ax.set_ylim(0, 1.01*y.max())
set_tight(pad=0)
region_style = dict(color='.93', zorder=-100)
Tc = .154
def _region_shear(mode='full', scale=.6):
"""
Estimate of the temperature dependence of shear viscosity eta/s.
"""
plt.figure(figsize=(scale*textwidth, scale*aspect*textwidth))
ax = plt.axes()
def etas(T, m=0, s=0, c=0):
return m + s*(T - Tc)*(T/Tc)**c
chain = mcmc.Chain()
rangedict = dict(zip(chain.keys, chain.range))
ekeys = ['etas_' + k for k in ['min', 'slope', 'curv']]
T = np.linspace(Tc, .3, 100)
prior = ax.fill_between(
T, etas(T, *(rangedict[k][1] for k in ekeys)),
**region_style
)
ax.set_xlim(xmin=.15)
ax.set_ylim(0, .6)
ax.set_xticks(np.arange(150, 301, 50)/1000)
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator(2))
auto_ticks(ax, 'y', minor=2)
ax.set_xlabel('Temperature [GeV]')
ax.set_ylabel(r'$\eta/s$')
if mode == 'empty':
return
if mode == 'examples':
for args in [
(.05, 1.0, -1),
(.10, 1.7, 0),
(.15, 2.0, 1),
]:
ax.plot(T, etas(T, *args), color=plt.cm.Blues(.7))
return
eparams = chain.load(*ekeys).T
intervals = np.array([
mcmc.credible_interval(etas(t, *eparams))
for t in T
]).T
band = ax.fill_between(T, *intervals, color=plt.cm.Blues(.32))
ax.plot(T, np.full_like(T, 1/(4*np.pi)), color='.6')
ax.text(.299, .07, r'KSS bound $1/4\pi$', va='top', ha='right', color='.4')
median, = ax.plot(
T, etas(T, *map(np.median, eparams)),
color=plt.cm.Blues(.77)
)
ax.legend(*zip(*[
(prior, 'Prior range'),
(median, 'Posterior median'),
(band, '90% credible region'),
]), loc='upper left', bbox_to_anchor=(0, 1.03))
@plot
def region_shear():
_region_shear()
@plot
def region_shear_empty():
_region_shear('empty')
@plot
def region_shear_examples():
_region_shear('examples', scale=.5)
def _region_bulk(mode='full', scale=.6):
"""
Estimate of the temperature dependence of bulk viscosity zeta/s.
"""
plt.figure(figsize=(scale*textwidth, scale*aspect*textwidth))
ax = plt.axes()
def zetas(T, zetas_max=0, zetas_width=1):
return zetas_max / (1 + ((T - Tc)/zetas_width)**2)
chain = mcmc.Chain()
keys, ranges = map(list, zip(*(
i for i in zip(chain.keys, chain.range)
if i[0].startswith('zetas')
)))
T = Tc*np.linspace(.5, 1.5, 1000)
maxdict = {k: r[1] for k, r in zip(keys, ranges)}
ax.fill_between(
T, zetas(T, **maxdict),
label='Prior range',
**region_style
)
ax.set_xlim(T[0], T[-1])
ax.set_ylim(0, 1.05*maxdict['zetas_max'])
auto_ticks(ax, minor=2)
ax.set_xlabel('Temperature [GeV]')
ax.set_ylabel(r'$\zeta/s$')
if mode == 'empty':
return
if mode == 'examples':
for args in [
(.025, .01),
(.050, .03),
(.075, .05),
]:
ax.plot(T, zetas(T, *args), color=plt.cm.Blues(.7))
return
# use a Gaussian mixture model to classify zeta/s parameters
samples = chain.load(*keys, thin=10)
gmm = GaussianMixture(n_components=3, covariance_type='full').fit(samples)
labels = gmm.predict(samples)
for n in range(gmm.n_components):
params = dict(zip(
keys,
(mcmc.credible_interval(s)[1] for s in samples[labels == n].T)
))
if params['zetas_max'] > .05:
cmap = 'Blues'
elif params['zetas_width'] > .03:
cmap = 'Greens'
else:
cmap = 'Oranges'
curve = zetas(T, **params)
color = getattr(plt.cm, cmap)(.65)
ax.plot(T, curve, color=color, zorder=-10)
ax.fill_between(T, curve, color=color, alpha=.1, zorder=-20)
ax.legend(loc='upper left')
@plot
def region_bulk():
_region_bulk()
@plot
def region_bulk_empty():
_region_bulk('empty')
@plot
def region_bulk_examples():
_region_bulk('examples', scale=.5)
@plot
def flow_corr():
"""
Symmetric cumulants SC(m, n) at the MAP point compared to experiment.
"""
plots, width_ratios = zip(*[
(('sc_central', 1e-7), 2),
(('sc', 2.9e-6), 3),
])
def label(*mn):
return r'$\mathrm{{SC}}({}, {})$'.format(*mn)
fig, axes = plt.subplots(
figsize=(textwidth, .42*textwidth),
ncols=len(plots), gridspec_kw=dict(width_ratios=width_ratios)
)
cmapx_normal = .7
cmapx_pred = .5
dashes_pred = [3, 2]
for (obs, ylim), ax in zip(plots, axes):
for (mn, cmap), sys in itertools.product(
[
((4, 2), 'Blues'),
((3, 2), 'Oranges'),
],
systems
):
x = model.map_data[sys][obs][mn]['x']
y = model.map_data[sys][obs][mn]['Y']
pred = obs not in expt.extra_data[sys]
cmapx = cmapx_pred if pred else cmapx_normal
kwargs = {}
if pred:
kwargs.update(dashes=dashes_pred)
if ax.is_last_col():
if not pred:
kwargs.update(label=label(*mn))
else:
fmt = '{:.2f} TeV'
if pred:
fmt += ' (prediction)'
lbl = fmt.format(parse_system(sys)[1]/1000)
if not any(l.get_label() == lbl for l in ax.get_lines()):
ax.add_line(lines.Line2D(
[], [], color=plt.cm.Greys(cmapx),
label=lbl, **kwargs
))
ax.plot(
x, y, lw=.75,
color=getattr(plt.cm, cmap)(cmapx),
**kwargs
)
if pred:
continue
x = expt.extra_data[sys][obs][mn]['x']
y = expt.extra_data[sys][obs][mn]['y']
yerr = expt.extra_data[sys][obs][mn]['yerr']
ax.errorbar(
x, y, yerr=yerr['stat'],
fmt='o', ms=2, capsize=0, color='.25', zorder=100
)
ax.fill_between(
x, y - yerr['sys'], y + yerr['sys'],
color='.9', zorder=-10
)
ax.axhline(
0, color='.75', lw=plt.rcParams['xtick.major.width'],
zorder=-100
)
ax.set_xlabel('Centrality %')
ax.set_ylim(-ylim, ylim)
auto_ticks(ax, 'y', nbins=6, minor=2)
ax.ticklabel_format(scilimits=(-5, 5))
if ax.is_first_col():
ax.set_ylabel(label('m', 'n'))
ax.legend(loc='upper left')
ax.set_title(dict(
sc_central='Most central collisions',
sc='Minimum bias'
)[obs])
@plot
def flow_extra():
"""
vn{2} in central bins and v2{4}.
"""
plots, width_ratios = zip(*[
(('vn_central', 'Central two-particle cumulants', r'$v_n\{2\}$'), 2),
(('vn4', 'Four-particle cumulants', r'$v_2\{4\}$'), 3),
])
fig, axes = plt.subplots(
figsize=(textwidth, .42*textwidth),
ncols=len(plots), gridspec_kw=dict(width_ratios=width_ratios)
)
cmaps = {2: plt.cm.GnBu, 3: plt.cm.Purples}
for (obs, title, ylabel), ax in zip(plots, axes):
for sys, (cmapx, dashes, fmt) in zip(
systems, [
(.7, (None, None), 'o'),
(.6, (3, 2), 's'),
]
):
syslabel = '{:.2f} TeV'.format(parse_system(sys)[1]/1000)
for subobs, dset in model.map_data[sys][obs].items():
x = dset['x']
y = dset['Y']
ax.plot(
x, y,
color=cmaps[subobs](cmapx), dashes=dashes,
label='Model ' + syslabel
)
try:
dset = expt.extra_data[sys][obs][subobs]
except KeyError:
continue
x = dset['x']
y = dset['y']
yerr = dset['yerr']
ax.errorbar(
x, y, yerr=yerr['stat'],
fmt=fmt, ms=2.2, capsize=0, color='.25', zorder=100,
label='ALICE ' + syslabel
)
ax.fill_between(
x, y - yerr['sys'], y + yerr['sys'],
color='.9', zorder=-10
)
if obs == 'vn_central':
ax.text(
x[-1] + .15, y[-1], '$v_{}$'.format(subobs),
color=cmaps[subobs](.99), ha='left', va='center'
)
auto_ticks(ax, 'y', minor=2)
ax.set_xlim(0, dset['cent'][-1][1])
ax.set_xlabel('Centrality %')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.legend(loc='lower right')
@plot
def design():
"""
Projection of a LH design into two dimensions.
"""
fig = plt.figure(figsize=(.5*textwidth, .5*textwidth))
ratio = 5
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_j = fig.add_subplot(gs[1:, :-1])
ax_x = fig.add_subplot(gs[0, :-1], sharex=ax_j)
ax_y = fig.add_subplot(gs[1:, -1], sharey=ax_j)
d = Design(systems[0])
keys = ('etas_min', 'etas_slope')
indices = tuple(d.keys.index(k) for k in keys)
x, y = (d.array[:, i] for i in indices)
ax_j.plot(x, y, 'o', color=plt.cm.Blues(0.75), mec='white', mew=.3)
hist_kw = dict(bins=30, color=plt.cm.Blues(0.4), edgecolor='white', lw=.5)
ax_x.hist(x, **hist_kw)
ax_y.hist(y, orientation='horizontal', **hist_kw)
for ax in fig.axes:
ax.tick_params(top='off', right='off')
spines = ['top', 'right']
if ax is ax_x:
spines += ['left']
elif ax is ax_y:
spines += ['bottom']
for spine in spines:
ax.spines[spine].set_visible(False)
for ax_name in 'xaxis', 'yaxis':
getattr(ax, ax_name).set_ticks_position('none')
auto_ticks(ax_j)
for ax in ax_x, ax_y:
ax.tick_params(labelbottom='off', labelleft='off')
for i, xy in zip(indices, 'xy'):
for f, l in [('lim', d.range), ('label', d.labels)]:
getattr(ax_j, 'set_{}{}'.format(xy, f))(l[i])
@plot
def gp():
"""
Conditioning a Gaussian process.
"""
fig, axes = plt.subplots(
figsize=(.45*textwidth, .85*textheight),
nrows=2, sharex='col'
)
def dummy_optimizer(obj_func, initial_theta, bounds):
return initial_theta, 0.
gp = GPR(1.*kernels.RBF(.8), optimizer=dummy_optimizer)
def sample_y(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
return gp.sample_y(*args, **kwargs)
x = np.linspace(0, 5, 1000)
X = x[:, np.newaxis]
x_train = np.linspace(.5, 4.5, 4)
X_train = x_train[:, np.newaxis]
for title, ax in zip(['Random functions', 'Conditioned on data'], axes):
if title.startswith('Conditioned'):
y = sample_y(X_train, random_state=23158).squeeze()
y -= .5*(y.max() + y.min())
gp.fit(X_train, y)
training_data, = plt.plot(x_train, y, 'o', color='.3', zorder=50)
for s, c in zip(
sample_y(X, n_samples=4, random_state=34576).T,
['Blues', 'Greens', 'Oranges', 'Purples']
):
ax.plot(x, s, color=getattr(plt.cm, c)(.6))
mean, std = gp.predict(X, return_std=True)
std = ax.fill_between(x, mean - std, mean + std, color='.92')
mean, = ax.plot(x, mean, color='.42', dashes=(3.5, 1.5))
ax.set_ylim(-2, 2)
ax.set_ylabel('Output')
auto_ticks(ax)
ax.set_title(title, y=.9)
ax.set_xlabel('Input')
ax.legend(*zip(*[
(mean, 'Mean prediction'),
(std, 'Uncertainty'),
(training_data, 'Training data'),
]), loc='lower left')
set_tight(fig, h_pad=1)
@plot
def pca():
fig = plt.figure(figsize=(.45*textwidth, .45*textwidth))
ratio = 5
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_j = fig.add_subplot(gs[1:, :-1])
ax_x = fig.add_subplot(gs[0, :-1], sharex=ax_j)
ax_y = fig.add_subplot(gs[1:, -1], sharey=ax_j)
x, y = (
model.data['PbPb2760'][obs][subobs]['Y'][:, 3]
for obs, subobs in [('dN_dy', 'pion'), ('vn', 2)]
)
xlabel = r'$dN_{\pi^\pm}/dy$'
ylabel = r'$v_2\{2\}$'
xlim = 0, 1500
ylim = 0, 0.15
cmap = plt.cm.Blues
ax_j.plot(x, y, 'o', color=cmap(.75), mec='white', mew=.25, zorder=10)
for d, ax, orientation in [(x, ax_x, 'vertical'), (y, ax_y, 'horizontal')]:
ax.hist(
d, bins=20,
orientation=orientation, color=cmap(.4), edgecolor='white'
)
xy = np.column_stack([x, y])
xymean = xy.mean(axis=0)
xystd = xy.std(axis=0)
xy -= xymean
xy /= xystd
pca = PCA().fit(xy)
pc = (
7 * xystd *
pca.explained_variance_ratio_[:, np.newaxis] *
pca.components_
)
for w, p in zip(pca.explained_variance_ratio_, pc):
if np.all(p < 0):
p *= -1
ax_j.annotate(
'', xymean + p, xymean, zorder=20,
arrowprops=dict(
arrowstyle='->', shrinkA=0, shrinkB=0,
color=offblack, lw=.7
)
)
ax_j.text(
*(xymean + p + (.8, .002)*np.sign(p)), s='{:.0f}%'.format(100*w),
color=offblack, ha='center', va='top' if p[1] < 0 else 'bottom',
zorder=20
)
for ax in fig.axes:
ax.tick_params(top='off', right='off')
spines = ['top', 'right']
if ax is ax_x:
spines += ['left']
elif ax is ax_y:
spines += ['bottom']
for spine in spines:
ax.spines[spine].set_visible(False)
for ax_name in 'xaxis', 'yaxis':
getattr(ax, ax_name).set_ticks_position('none')
for ax in ax_x, ax_y:
ax.tick_params(labelbottom='off', labelleft='off')
auto_ticks(ax_j)
ax_j.set_xlim(xlim)
ax_j.set_ylim(ylim)
ax_j.set_xlabel(xlabel)
ax_j.set_ylabel(ylabel)
set_tight(pad=.1, h_pad=.3, w_pad=.3)
@plot
def trento_events():
"""
Random trento events.
"""
fig, axes = plt.subplots(
nrows=3, sharex='col',
figsize=(.28*textwidth, .85*textheight)
)
xymax = 8.
xyr = [-xymax, xymax]
with tempfile.NamedTemporaryFile(suffix='.hdf') as t:
subprocess.run((
'trento Pb Pb {} --quiet --b-max 12 '
'--grid-max {} --grid-step .1 '
'--random-seed 6347321 --output {}'
).format(axes.size, xymax, t.name).split())
with h5py.File(t.name, 'r') as f:
for dset, ax in zip(f.values(), axes):
ax.pcolorfast(xyr, xyr, np.array(dset), cmap=plt.cm.Blues)
ax.set_aspect('equal')
for xy in ['x', 'y']:
getattr(ax, 'set_{}ticks'.format(xy))([-5, 0, 5])
axes[-1].set_xlabel('$x$ [fm]')
axes[1].set_ylabel('$y$ [fm]')
set_tight(fig, h_pad=.5)
if __name__ == '__main__':
import argparse
choices = list(plot_functions)
def arg_to_plot(arg):
arg = Path(arg).stem
if arg not in choices:
raise argparse.ArgumentTypeError(arg)
return arg
parser = argparse.ArgumentParser(description='generate plots')
parser.add_argument(
'--serif', action='store_true',
help='use STIX serif font'
)
parser.add_argument(
'--black', action='store_true',
help='use true black for text and axes'
)
parser.add_argument(
'plots', nargs='*', type=arg_to_plot, metavar='PLOT',
help='{} (default: all)'.format(', '.join(choices).join('{}'))
)
args = parser.parse_args()
if args.serif:
plt.rcParams.update({
'font.family': 'serif',
'font.serif': ['STIXGeneral'],
'mathtext.fontset': 'stix',
})
if args.black:
plt.rcParams.update(dict.fromkeys([
'text.color',
'axes.edgecolor',
'axes.labelcolor',
'xtick.color',
'ytick.color',
], 'black'))
if args.plots:
for p in args.plots:
plot_functions[p]()
else:
for f in plot_functions.values():
f()
| mit |
466152112/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
poryfly/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 18 | 25692 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
ChenglongChen/Kaggle_HomeDepot | Code/Igor&Kostia/text_processing_wo_google.py | 1 | 63882 | # -*- coding: utf-8 -*-
"""
Initial text preprocessing.
Although text processing can be technically done within feature generation functions,
we found it to be very efficient to make all preprocessing first and only then move to
feature generation. It is because the same processed text is used as an input to
generate several different features.
This file is the same as text_processing.py, except this line is commented
df_all['search_term']=df_all['search_term'].map(lambda x: google_dict[x] if x in google_dict.keys() else x)
and the ouput is saved with '_wo_google' added to file names.
Competition: HomeDepot Search Relevance
Author: Igor Buinyi
Team: Turing test
"""
from config_IgorKostia import *
import numpy as np
import pandas as pd
from time import time
import re
import csv
import os
import nltk
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
stoplist.append('till') # add 'till' to stoplist
# 'can' also might mean 'a container' like in 'trash can'
# so we create a separate stop list without 'can' to be used for query and product title
stoplist_wo_can=stoplist[:]
stoplist_wo_can.remove('can')
from homedepot_functions import *
from google_dict import *
t0 = time()
t1 = time()
############################################
####### PREPROCESSING ######################
############################################
### load train and test ###################
df_train = pd.read_csv(DATA_DIR+'/train.csv', encoding="ISO-8859-1")
df_test = pd.read_csv(DATA_DIR+'/test.csv', encoding="ISO-8859-1")
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
### load product attributes ###############
df_attr = pd.read_csv(DATA_DIR+'/attributes.csv', encoding="ISO-8859-1")
print 'loading time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### find unique brands from the attributes file
### for a few product_uids there are at least two different names in "MFG Brand Name"
### in such cases we keep only one of the names
df_all = pd.merge(df_all, df_attr[df_attr['name']=="MFG Brand Name"][['product_uid','value']], how='left', on='product_uid')
df_all['brand']=df_all['value'].fillna("").map(lambda x: x.encode('utf-8'))
df_all=df_all.drop('value',axis=1)
### Create a list of words with lowercase and uppercase letters
### Examples: 'InSinkErator', 'EpoxyShield'
### They are words from brand names or words from product title.
### The dict is used to correct product description which contins concatenated
### lines of text without separators :
### ---View lawn edgings and brick/ paver edgingsUtility stakes can be used for many purposes---
### Here we need to replace 'edgingsUtility' with 'edgings utility'.
### But we don't need to replace 'InSinkErator' with 'in sink erator'
add_space_stop_list=[]
uniq_brands=list(set(list(df_all['brand'])))
for i in range(0,len(uniq_brands)):
uniq_brands[i]=simple_parser(uniq_brands[i])
if re.search(r'[a-z][A-Z][a-z]',uniq_brands[i])!=None:
for word in uniq_brands[i].split():
if re.search(r'[a-z][A-Z][a-z]',word)!=None:
add_space_stop_list.append(word.lower())
add_space_stop_list=list(set(add_space_stop_list))
print len(add_space_stop_list)," words from brands in add_space_stop_list"
uniq_titles=list(set(list(df_all['product_title'])))
for i in range(0,len(uniq_titles)):
uniq_titles[i]=simple_parser(uniq_titles[i])
if re.search(r'[a-z][A-Z][a-z]',uniq_titles[i])!=None:
for word in uniq_titles[i].split():
if re.search(r'[a-z][A-Z][a-z]',word)!=None:
add_space_stop_list.append(word.lower())
add_space_stop_list=list(set(add_space_stop_list))
print len(add_space_stop_list) ," total words from brands and product titles in add_space_stop_list\n"
#################################################################
##### First step of spell correction: using the Google dict
##### from the forum
# https://www.kaggle.com/steubk/home-depot-product-search-relevance/fixing-typos
## the following line is commented in this file
####df_all['search_term']=df_all['search_term'].map(lambda x: google_dict[x] if x in google_dict.keys() else x)
#################################################################
##### AUTOMATIC SPELL CHECKER ###################################
#################################################################
### A simple spell checker is implemented here
### First, we get unique words from search_term and product_title
### Then, we count how many times word occurs in search_term and product_title
### Finally, if the word is not present in product_title and not meaningful
### (i.e. wn.synsets(word) returns empty list), the word is likely
### to be misspelled, so we try to correct it using bigrams, words from matched
### products or all products. The best match is chosen using
### difflib.SequenceMatcher()
def is_word_in_string(word,s):
return word in s.split()
def create_bigrams(s):
lst=[word for word in s.split() if len(re.sub('[^0-9]', '', word))==0 and len(word)>2]
output=""
i=0
if len(lst)>=2:
while i<len(lst)-1:
output+= " "+lst[i]+"_"+lst[i+1]
i+=1
return output
df_all['product_title_simpleparsed']=df_all['product_title'].map(lambda x: simple_parser(x).lower())
df_all['search_term_simpleparsed']=df_all['search_term'].map(lambda x: simple_parser(x).lower())
str_title=" ".join(list(df_all['product_title'].map(lambda x: simple_parser(x).lower())))
str_query=" ".join(list(df_all['search_term'].map(lambda x: simple_parser(x).lower())))
# create bigrams
bigrams_str_title=" ".join(list(df_all['product_title'].map(lambda x: create_bigrams(simple_parser(x).lower()))))
bigrams_set=set(bigrams_str_title.split())
### count word frequencies for query and product title
my_dict={}
str1= str_title+" "+str_query
for word in list(set(list(str1.split()))):
my_dict[word]={"title":0, "query":0, 'word':word}
for word in str_title.split():
my_dict[word]["title"]+=1
for word in str_query.split():
my_dict[word]["query"]+=1
### 1. Process words without digits
### Potential errors: words that appear only in query
### Correct words: 5 or more times in product_title
errors_dict={}
correct_dict={}
for word in my_dict.keys():
if len(word)>=3 and len(re.sub('[^0-9]', '', word))==0:
if my_dict[word]["title"]==0:
if len(wn.synsets(word))>0 \
or (word.endswith('s') and (word[:-1] in my_dict.keys()) and my_dict[word[:-1]]["title"]>0)\
or (word[-1]!='s' and (word+'s' in my_dict.keys()) and my_dict[word+'s']["title"]>0):
1
else:
errors_dict[word]=my_dict[word]
elif my_dict[word]["title"]>=5:
correct_dict[word]=my_dict[word]
### for each error word try finding a good match in bigrams, matched products, all products
cnt=0
NN=len(errors_dict.keys())
t0=time()
for i in range(0,len(errors_dict.keys())):
word=sorted(errors_dict.keys())[i]
cnt+=1
lst=[]
lst_tuple=[]
suggested=False
suggested_word=""
rt_max=0
# if only one word in query, use be more selective in choosing a correction
min_query_len=min(df_all['search_term_simpleparsed'][df_all['search_term_simpleparsed'].map(lambda x: is_word_in_string(word,x))].map(lambda x: len(x.split())))
delta=0.05*int(min_query_len<2)
words_from_matched_titles=[item for item in \
" ".join(list(set(df_all['product_title_simpleparsed'][df_all['search_term_simpleparsed'].map(lambda x: is_word_in_string(word,x))]))).split() \
if len(item)>2 and len(re.sub('[^0-9]', '', item))==0]
words_from_matched_titles=list(set(words_from_matched_titles))
words_from_matched_titles.sort()
source=""
for bigram in bigrams_set:
if bigram.replace("_","")==word:
suggested=True
suggested_word=bigram.replace("_"," ")
source="from bigrams"
if source=="":
for correct_word in words_from_matched_titles:
rt, rt_scaled = seq_matcher(word,correct_word)
#print correct_word, rt,rt_scaled
if rt>0.75+delta or (len(word)<6 and rt>0.68+delta):
lst.append(correct_word)
lst_tuple.append((correct_word,my_dict[correct_word]["title"]))
if rt>rt_max:
rt_max=rt
suggested=True
source="from matched products"
suggested_word=correct_word
elif rt==rt_max and seq_matcher("".join(sorted(word)),"".join(sorted(correct_word)))[0]>seq_matcher("".join(sorted(word)),"".join(sorted(suggested_word)))[0]:
suggested_word=correct_word
elif rt==rt_max:
suggested=False
source=""
if source=="" and len(lst)==0:
source="from all products"
for correct_word in correct_dict.keys():
rt, rt_scaled = seq_matcher(word,correct_word)
#print correct_word, rt,rt_scaled
if correct_dict[correct_word]["title"]>10 and (rt>0.8+delta or (len(word)<6 and rt>0.73+delta)):
#print correct_word, rt,rt_scaled
lst.append(correct_word)
lst_tuple.append((correct_word,correct_dict[correct_word]["title"]))
if rt>rt_max:
rt_max=rt
suggested=True
suggested_word=correct_word
elif rt==rt_max and seq_matcher("".join(sorted(word)),"".join(sorted(correct_word)))[0]>seq_matcher("".join(sorted(word)),"".join(sorted(suggested_word)))[0]:
suggested_word=correct_word
elif rt==rt_max:
suggested=False
if suggested==True:
errors_dict[word]["suggestion"]=suggested_word
errors_dict[word]["others"]=lst_tuple
errors_dict[word]["source"]=source
else:
errors_dict[word]["suggestion"]=""
errors_dict[word]["others"]=lst_tuple
errors_dict[word]["source"]=source
#print(cnt, word, errors_dict[word]["query"], errors_dict[word]["suggestion"], source, errors_dict[word]["others"])
#if (cnt % 20)==0:
# print cnt, " out of ", NN, "; ", round((time()-t0),1) ,' sec'
### 2. Add some words with digits
### If the word begins with a meanigful part [len(wn.synsets(srch.group(0)))>0],
### ends with a number and has vowels
for word in my_dict.keys():
if my_dict[word]['query']>0 and my_dict[word]['title']==0 \
and len(re.sub('[^0-9]', '', word))!=0 and len(re.sub('[^a-z]', '', word))!=0:
srch=re.search(r'(?<=^)[a-z][a-z][a-z]+(?=[0-9])',word)
if srch!=None and len(wn.synsets(srch.group(0)))>0 \
and len(re.sub('[^aeiou]', '', word))>0 and word[-1] in '0123456789':
errors_dict[word]=my_dict[word]
errors_dict[word]["source"]="added space before digit"
errors_dict[word]["suggestion"]=re.sub(r'(?<=^)'+srch.group(0)+r'(?=[a-zA-Z0-9])',srch.group(0)+' ',word)
#print word, re.sub(r'(?<=^)'+srch.group(0)+r'(?=[a-zA-Z0-9])',srch.group(0)+' ',word)
### save dictionary
corrections_df=pd.DataFrame(errors_dict).transpose()
corrections_df.to_csv(PROCESSINGTEXT_DIR+"/automatically_generated_word_corrections_wo_google.csv")
print 'building spell checker time:',round((time()-t0)/60,1) ,'minutes\n'
##### END OF SPELL CHECKER ######################################
#################################################################
########################################
##### load words for spell checker
spell_check_dict={}
for word in errors_dict.keys():
if errors_dict[word]['suggestion']!="":
spell_check_dict[word]=errors_dict[word]['suggestion']
"""
spell_check_dict={}
with open(PROCESSINGTEXT_DIR+"/automatically_generated_word_corrections_wo_google.csv") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['suggestion']!="":
spell_check_dict[row['word']]=row['suggestion']
"""
###############################################
### parse query and product title
df_all['search_term_parsed']=col_parser(df_all['search_term'],automatic_spell_check_dict=spell_check_dict,\
add_space_stop_list=[]).map(lambda x: x.encode('utf-8'))
df_all['search_term_parsed_wospellcheck']=col_parser(df_all['search_term'],automatic_spell_check_dict={},\
add_space_stop_list=[]).map(lambda x: x.encode('utf-8'))
print 'search_term parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### function to check whether queries parsed with and without spell correction are identical
def match_queries(q1,q2):
q1=re.sub('[^a-z\ ]', '', q1)
q2=re.sub('[^a-z\ ]', '', q2)
q1= " ".join([word[0:(len(word)-int(word[-1]=='s'))] for word in q1.split()])
q2= " ".join([word[0:(len(word)-int(word[-1]=='s'))] for word in q2.split()])
return difflib.SequenceMatcher(None, q1,q2).ratio()
df_all['is_query_misspelled']=df_all.apply(lambda x: \
match_queries(x['search_term_parsed'],x['search_term_parsed_wospellcheck']),axis=1)
df_all=df_all.drop(['search_term_parsed_wospellcheck'],axis=1)
print 'create dummy "is_query_misspelled" time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['product_title_parsed']=col_parser(df_all['product_title'],add_space_stop_list=[],\
remove_from_brackets=True).map(lambda x: x.encode('utf-8'))
print 'product_title parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
#################################################################
##### COUNT BRAND NAMES #########################################
#################################################################
### some brand names in "MFG Brand Name" of attributes.csv have a few words
### but it is much more likely for a person to search for brand 'BEHR'
### than 'BEHR PREMIUM PLUS ULTRA'. That is why we replace long brand names
### with a shorter alternatives
replace_brand_dict={
'acurio latticeworks': 'acurio',
'american kennel club':'akc',
'amerimax home products': 'amerimax',
'barclay products':'barclay',
'behr marquee': 'behr',
'behr premium': 'behr',
'behr premium deckover': 'behr',
'behr premium plus': 'behr',
'behr premium plus ultra': 'behr',
'behr premium textured deckover': 'behr',
'behr pro': 'behr',
'bel air lighting': 'bel air',
'bootz industries':'bootz',
'campbell hausfeld':'campbell',
'columbia forest products': 'columbia',
'essick air products':'essick air',
'evergreen enterprises':'evergreen',
'feather river doors': 'feather river',
'gardner bender':'gardner',
'ge parts':'ge',
'ge reveal':'ge',
'gibraltar building products':'gibraltar',
'gibraltar mailboxes':'gibraltar',
'glacier bay':'glacier',
'great outdoors by minka lavery': 'great outdoors',
'hamilton beach': 'hamilton',
'hampton bay':'hampton',
'hampton bay quickship':'hampton',
'handy home products':'handy home',
'hickory hardware': 'hickory',
'home accents holiday': 'home accents',
'home decorators collection': 'home decorators',
'homewerks worldwide':'homewerks',
'klein tools': 'klein',
'lakewood cabinets':'lakewood',
'leatherman tool group':'leatherman',
'legrand adorne':'legrand',
'legrand wiremold':'legrand',
'lg hausys hi macs':'lg',
'lg hausys viatera':'lg',
'liberty foundry':'liberty',
'liberty garden':'liberty',
'lithonia lighting':'lithonia',
'loloi rugs':'loloi',
'maasdam powr lift':'maasdam',
'maasdam powr pull':'maasdam',
'martha stewart living': 'martha stewart',
'merola tile': 'merola',
'miracle gro':'miracle',
'miracle sealants':'miracle',
'mohawk home': 'mohawk',
'mtd genuine factory parts':'mtd',
'mueller streamline': 'mueller',
'newport coastal': 'newport',
'nourison overstock':'nourison',
'nourison rug boutique':'nourison',
'owens corning': 'owens',
'premier copper products':'premier',
'price pfister':'pfister',
'pride garden products':'pride garden',
'prime line products':'prime line',
'redi base':'redi',
'redi drain':'redi',
'redi flash':'redi',
'redi ledge':'redi',
'redi neo':'redi',
'redi niche':'redi',
'redi shade':'redi',
'redi trench':'redi',
'reese towpower':'reese',
'rheem performance': 'rheem',
'rheem ecosense': 'rheem',
'rheem performance plus': 'rheem',
'rheem protech': 'rheem',
'richelieu hardware':'richelieu',
'rubbermaid commercial products': 'rubbermaid',
'rust oleum american accents': 'rust oleum',
'rust oleum automotive': 'rust oleum',
'rust oleum concrete stain': 'rust oleum',
'rust oleum epoxyshield': 'rust oleum',
'rust oleum flexidip': 'rust oleum',
'rust oleum marine': 'rust oleum',
'rust oleum neverwet': 'rust oleum',
'rust oleum parks': 'rust oleum',
'rust oleum professional': 'rust oleum',
'rust oleum restore': 'rust oleum',
'rust oleum rocksolid': 'rust oleum',
'rust oleum specialty': 'rust oleum',
'rust oleum stops rust': 'rust oleum',
'rust oleum transformations': 'rust oleum',
'rust oleum universal': 'rust oleum',
'rust oleum painter touch 2': 'rust oleum',
'rust oleum industrial choice':'rust oleum',
'rust oleum okon':'rust oleum',
'rust oleum painter touch':'rust oleum',
'rust oleum painter touch 2':'rust oleum',
'rust oleum porch and floor':'rust oleum',
'salsbury industries':'salsbury',
'simpson strong tie': 'simpson',
'speedi boot': 'speedi',
'speedi collar': 'speedi',
'speedi grille': 'speedi',
'speedi products': 'speedi',
'speedi vent': 'speedi',
'pass and seymour': 'seymour',
'pavestone rumblestone': 'rumblestone',
'philips advance':'philips',
'philips fastener':'philips',
'philips ii plus':'philips',
'philips manufacturing company':'philips',
'safety first':'safety 1st',
'sea gull lighting': 'sea gull',
'scott':'scotts',
'scotts earthgro':'scotts',
'south shore furniture': 'south shore',
'tafco windows': 'tafco',
'trafficmaster allure': 'trafficmaster',
'trafficmaster allure plus': 'trafficmaster',
'trafficmaster allure ultra': 'trafficmaster',
'trafficmaster ceramica': 'trafficmaster',
'trafficmaster interlock': 'trafficmaster',
'thomas lighting': 'thomas',
'unique home designs':'unique home',
'veranda hp':'veranda',
'whitehaus collection':'whitehaus',
'woodgrain distritubtion':'woodgrain',
'woodgrain millwork': 'woodgrain',
'woodford manufacturing company': 'woodford',
'wyndham collection':'wyndham',
'yardgard select': 'yardgard',
'yosemite home decor': 'yosemite'
}
df_all['brand_parsed']=col_parser(df_all['brand'].map(lambda x: re.sub('^[t|T]he ', '', x.replace(".N/A","").replace("N.A.","").replace("n/a","").replace("Generic Unbranded","").replace("Unbranded","").replace("Generic",""))),add_space_stop_list=add_space_stop_list)
list_brands=list(df_all['brand_parsed'])
df_all['brand_parsed']=df_all['brand_parsed'].map(lambda x: replace_brand_dict[x] if x in replace_brand_dict.keys() else x)
### count frequencies of brands in query and product_title
str_query=" : ".join(list(df_all['search_term_parsed'])).lower()
print "\nGenerating brand dict: How many times each brand appears in the dataset?"
brand_dict=get_attribute_dict(list_brands,str_query=str_query)
### These words are likely to mean other things than brand names.
### For example, it would not be prudent to consider each occurence of 'design' or 'veranda' as a brand name.
### We decide not to use these words as brands and exclude them from our brand dictionary.
# The list is shared on the forum.
del_list=['aaa','off','impact','square','shelves','finish','ring','flood','dual','ball','cutter',\
'max','off','mat','allure','diamond','drive', 'edge','anchor','walls','universal','cat', 'dawn','ion','daylight',\
'roman', 'weed eater', 'restore', 'design', 'caddy', 'pole caddy', 'jet', 'classic', 'element', 'aqua',\
'terra', 'decora', 'ez', 'briggs', 'wedge', 'sunbrella', 'adorne', 'santa', 'bella', 'duck', 'hotpoint',\
'duck', 'tech', 'titan', 'powerwasher', 'cooper lighting', 'heritage', 'imperial', 'monster', 'peak',
'bell', 'drive', 'trademark', 'toto', 'champion', 'shop vac', 'lava', 'jet', 'flood', \
'roman', 'duck', 'magic', 'allen', 'bunn', 'element', 'international', 'larson', 'tiki', 'titan', \
'space saver', 'cutter', 'scotch', 'adorne', 'ball', 'sunbeam', 'fatmax', 'poulan', 'ring', 'sparkle', 'bissell', \
'universal', 'paw', 'wedge', 'restore', 'daylight', 'edge', 'americana', 'wacker', 'cat', 'allure', 'bonnie plants', \
'troy', 'impact', 'buffalo', 'adams', 'jasco', 'rapid dry', 'aaa', 'pole caddy', 'pac', 'seymour', 'mobil', \
'mastercool', 'coca cola', 'timberline', 'classic', 'caddy', 'sentry', 'terrain', 'nautilus', 'precision', \
'artisan', 'mural', 'game', 'royal', 'use', 'dawn', 'task', 'american line', 'sawtrax', 'solo', 'elements', \
'summit', 'anchor', 'off', 'spruce', 'medina', 'shoulder dolly', 'brentwood', 'alex', 'wilkins', 'natural magic', \
'kodiak', 'metro', 'shelter', 'centipede', 'imperial', 'cooper lighting', 'exide', 'bella', 'ez', 'decora', \
'terra', 'design', 'diamond', 'mat', 'finish', 'tilex', 'rhino', 'crock pot', 'legend', 'leatherman', 'remove', \
'architect series', 'greased lightning', 'castle', 'spirit', 'corian', 'peak', 'monster', 'heritage', 'powerwasher',\
'reese', 'tech', 'santa', 'briggs', 'aqua', 'weed eater', 'ion', 'walls', 'max', 'dual', 'shelves', 'square',\
'hickory', "vikrell", "e3", "pro series", "keeper", "coastal shower doors", 'cadet','church','gerber','glidden',\
'cooper wiring devices', 'border blocks', 'commercial electric', 'pri','exteria','extreme', 'veranda',\
'gorilla glue','gorilla','shark','wen']
del_list=list(set(list(del_list)))
for key in del_list:
if key in brand_dict.keys():
del(brand_dict[key])
# save to file
brand_df=pd.DataFrame(brand_dict).transpose()
brand_df.to_csv(PROCESSINGTEXT_DIR+"/brand_statistics_wo_google.csv")
"""
brand_dict={}
import csv
with open(PROCESSINGTEXT_DIR+"/brand_statistics_wo_google.csv") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
brand_dict[row['name']]={'cnt_attribute': int(row['cnt_attribute']), 'cnt_query': int(row['cnt_query']),
'name': row['name'], 'nwords': int(row['nwords'])}
"""
### Later we will create features like match between brands in query and product titles.
### But we only process brands that apper frequently enough in the dataset:
### Either 8+ times in product title or [1+ time in query and 3+ times in product title]
for item in brand_dict.keys():
if (brand_dict[item]['cnt_attribute']>=3 and brand_dict[item]['cnt_query']>=1) \
or (brand_dict[item]['cnt_attribute'])>=8:
1
else:
del(brand_dict[item])
brand_df=pd.DataFrame(brand_dict).transpose().sort(['cnt_query'], ascending=[1])
print 'brand dict creation time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF COUNT BRAND NAMES ##################################
#################################################################
#################################################################
##### COUNT MATERIALS ###########################################
#################################################################
### First, create list of unique materials. We need to replace some complex names
### (see change_material() function)
### File attributes.csv for some product_uid contains multiple different values of "Material"
### That is why we have to concatenate all such values to ensure that each product_uid
### has only one value for material
tmp_material=df_attr[df_attr['name']=="Material"][['product_uid','value']]
tmp_material=tmp_material[tmp_material['value']!="Other"]
tmp_material=tmp_material[tmp_material['value']!="*"]
def change_material(s):
replace_dict={'Medium Density Fiberboard (MDF)':'mdf', 'High Density Fiberboard (HDF)':'hdf',\
'Fibre Reinforced Polymer (FRP)': 'frp', 'Acrylonitrile Butadiene Styrene (ABS)': 'abs',\
'Cross-Linked Polyethylene (PEX)':'pex', 'Chlorinated Poly Vinyl Chloride (CPVC)': 'cpvc',\
'PVC (vinyl)': 'pvc','Thermoplastic rubber (TPR)':'tpr','Poly Lactic Acid (PLA)': 'pla',\
'100% Polyester':'polyester','100% UV Olefin':'olefin', '100% BCF Polypropylene': 'polypropylene',\
'100% PVC':'pvc'}
if s in replace_dict.keys():
s=replace_dict[s]
return s
tmp_material['value'] = tmp_material['value'].map(lambda x: change_material(x))
dict_materials = {}
key_list=tmp_material['product_uid'].keys()
for i in range(0,len(key_list)):
if tmp_material['product_uid'][key_list[i]] not in dict_materials.keys():
dict_materials[tmp_material['product_uid'][key_list[i]]]={}
dict_materials[tmp_material['product_uid'][key_list[i]]]['product_uid']=tmp_material['product_uid'][key_list[i]]
dict_materials[tmp_material['product_uid'][key_list[i]]]['cnt']=1
dict_materials[tmp_material['product_uid'][key_list[i]]]['material']=tmp_material['value'][key_list[i]]
else:
##print key_list[i]
dict_materials[tmp_material['product_uid'][key_list[i]]]['material']=dict_materials[tmp_material['product_uid'][key_list[i]]]['material']+' '+tmp_material['value'][key_list[i]]
dict_materials[tmp_material['product_uid'][key_list[i]]]['cnt']+=1
if (i % 10000)==0:
print i
df_materials=pd.DataFrame(dict_materials).transpose()
### merge created 'material' column with df_all
df_all = pd.merge(df_all, df_materials[['product_uid','material']], how='left', on='product_uid')
df_all['material']=df_all['material'].fillna("").map(lambda x: x.encode('utf-8'))
df_all['material_parsed']=col_parser(df_all['material'].map(lambda x: x.replace("Other","").replace("*","")), parse_material=True,add_space_stop_list=[])
### list of all materials
list_materials=list(df_all['material_parsed'].map(lambda x: x.lower()))
### count frequencies of materials in query and product_title
print "\nGenerating material dict: How many times each material appears in the dataset?"
material_dict=get_attribute_dict(list_materials,str_query=str_query)
### create dataframe and save to file
material_df=pd.DataFrame(material_dict).transpose()
material_df.to_csv(PROCESSINGTEXT_DIR+"/material_statistics_wo_google.csv")
### For further processing keep only materials that appear
### more 10+ times in product_title and at least once in query
"""
for item in material_dict.keys():
if (material_dict[item]['cnt_attribute']>=10 and material_dict[item]['cnt_query']>=1):
1
else:
del(material_dict[item])
"""
for key in set(material_dict.keys()):
if material_dict[key]['cnt_attribute']<20 or material_dict[key]['cnt_query']>3*material_dict[key]['cnt_attribute']:
del(material_dict[key])
material_df=pd.DataFrame(material_dict).transpose().sort(['cnt_query'], ascending=[1])
print 'material dict creation time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
#################################################################
##### END OF COUNT MATERIALS ####################################
#################################################################
#################################################################
##### EXTRACT MATERIALS FROM QUERY AND PRODUCT TITLE ############
#################################################################
### At this moment we have parsed query and product title
### Now we will produce for query:
### brands_in_query, materials_in_query
### query_without_brand_names (we remove brand names from the text)
### query_without_brand_names_and_materials.
### Also, similar columns for product title.
def getremove_brand_or_material_from_str(s,df, replace_brand_dict={}):
items_found=[]
df=df.sort_values(['nwords'],ascending=[0])
key_list=df['nwords'].keys()
#start with several-word brands or materials
#assert df['nwords'][key_list[0]]>1
for i in range(0,len(key_list)):
item=df['name'][key_list[i]]
if item in s:
if re.search(r'\b'+item+r'\b',s)!=None:
s=re.sub(r'\b'+item+r'\b', '', s)
if item in replace_brand_dict.keys():
items_found.append(replace_brand_dict[item])
else:
items_found.append(item)
return " ".join(s.split()), ";".join(items_found)
### We process only unique queries and product titles
### to reduce the processing time by more than 50%
aa=list(set(list(df_all['search_term_parsed'])))
my_dict={}
for i in range(0,len(aa)):
my_dict[aa[i]]=getremove_brand_or_material_from_str(aa[i],brand_df)
if (i % 5000)==0:
print "Extracted brands from",i,"out of",len(aa),"unique search terms; ", str(round((time()-t0)/60,1)),"minutes"
df_all['search_term_tuple']= df_all['search_term_parsed'].map(lambda x: my_dict[x])
df_all['search_term_parsed_woBrand']= df_all['search_term_tuple'].map(lambda x: x[0])
df_all['brands_in_search_term']= df_all['search_term_tuple'].map(lambda x: x[1])
print 'extract brands from query time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['search_term_tuple']= df_all['search_term_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_all['search_term_parsed_woBM']= df_all['search_term_tuple'].map(lambda x: x[0])
df_all['materials_in_search_term']= df_all['search_term_tuple'].map(lambda x: x[1])
df_all=df_all.drop('search_term_tuple',axis=1)
print 'extract materials from query time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##############################
aa=list(set(list(df_all['product_title_parsed'])))
my_dict={}
for i in range(0,len(aa)):
my_dict[aa[i]]=getremove_brand_or_material_from_str(aa[i],brand_df)
if (i % 5000)==0:
print "Extracted brands from",i,"out of",len(aa),"unique product titles; ", str(round((time()-t0)/60,1)),"minutes"
df_all['product_title_tuple']= df_all['product_title_parsed'].map(lambda x: my_dict[x])
df_all['product_title_parsed_woBrand']= df_all['product_title_tuple'].map(lambda x: x[0])
df_all['brands_in_product_title']= df_all['product_title_tuple'].map(lambda x: x[1])
print 'extract brands from product title time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['product_title_tuple']= df_all['product_title_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_all['product_title_parsed_woBM']= df_all['product_title_tuple'].map(lambda x: x[0])
df_all['materials_in_product_title']= df_all['product_title_tuple'].map(lambda x: x[1])
df_all=df_all.drop('product_title_tuple',axis=1)
print 'extract materials from product titles time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF EXTRACT MATERIALS FROM QUERY AND PRODUCT TITLE #####
#################################################################
###################################
##### Tagging #####################
### We use nltk.pos_tagger() to tag words
df_all['search_term_tokens'] =col_tagger(df_all['search_term_parsed_woBM'])
print 'search term tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_all['product_title_tokens'] =col_tagger(df_all['product_title_parsed_woBM'])
print 'product title tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
#################################################################
##### PROCESS ATTRIBUTES BULLETS ################################
#################################################################
### File attribute.csv contains 5343 different categories
### (https://www.kaggle.com/briantc/home-depot-product-search-relevance/homedepot-first-dataexploreation-k)
### Here we get we process only text in categories named 'Bullet##' where # stands for a number.
### This text is similar to product descriptions from 'product_descriptions.csv'.
### First, we concatenate all bullets for the same product_uid
df_attr['product_uid']=df_attr['product_uid'].fillna(0)
df_attr['value']=df_attr['value'].fillna("")
df_attr['name']=df_attr['name'].fillna("")
dict_attr={}
for product_uid in list(set(list(df_attr['product_uid']))):
dict_attr[int(product_uid)]={'product_uid':int(product_uid),'attribute_bullets':[]}
for i in range(0,len(df_attr['product_uid'])):
if (i % 100000)==0:
print "Read",i,"out of", len(df_attr['product_uid']), "rows in attributes.csv in", round((time()-t0)/60,1) ,'minutes'
if df_attr['name'][i][0:6]=="Bullet":
dict_attr[int(df_attr['product_uid'][i])]['attribute_bullets'].append(df_attr['value'][i])
if 0 in dict_attr.keys():
del(dict_attr[0])
for item in dict_attr.keys():
if len(dict_attr[item]['attribute_bullets'])>0:
dict_attr[item]['attribute_bullets']=". ".join(dict_attr[item]['attribute_bullets'])
dict_attr[item]['attribute_bullets']+="."
else:
dict_attr[item]['attribute_bullets']=""
df_attr_bullets=pd.DataFrame(dict_attr).transpose()
df_attr_bullets['attribute_bullets']=df_attr_bullets['attribute_bullets'].map(lambda x: x.replace("..",".").encode('utf-8'))
print 'create attributes bullets time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Then we follow similar steps as for query and product title above
### Parsing
df_attr_bullets['attribute_bullets_parsed'] = df_attr_bullets['attribute_bullets'].map(lambda x:str_parser(x,add_space_stop_list=[]))
print 'attribute bullets parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Extracting brands...
df_attr_bullets['attribute_bullets_tuple']= df_attr_bullets['attribute_bullets_parsed'].map(lambda x: getremove_brand_or_material_from_str(x,brand_df))
df_attr_bullets['attribute_bullets_parsed_woBrand']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[0])
df_attr_bullets['brands_in_attribute_bullets']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[1])
print 'extract brands from attribute_bullets time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and materials from text...
df_attr_bullets['attribute_bullets_tuple']= df_attr_bullets['attribute_bullets_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_attr_bullets['attribute_bullets_parsed_woBM']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[0])
df_attr_bullets['materials_in_attribute_bullets']= df_attr_bullets['attribute_bullets_tuple'].map(lambda x: x[1])
df_attr_bullets=df_attr_bullets.drop(['attribute_bullets_tuple'],axis=1)
print 'extract materials from attribute_bullets time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and tagging text using NLTK
df_attr_bullets['attribute_bullets_tokens'] =col_tagger(df_attr_bullets['attribute_bullets_parsed_woBM'])
print 'attribute bullets tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF PROCESS ATTRIBUTES BULLETS #########################
#################################################################
#################################################################
##### PROCESS PRODUCT DESCRIPTIONS ##############################
#################################################################
df_pro_desc = pd.read_csv(DATA_DIR+'/product_descriptions.csv')
### Parsing
df_pro_desc['product_description_parsed'] = df_pro_desc['product_description'].map(lambda x:str_parser(x,add_space_stop_list=add_space_stop_list).encode('utf-8'))
print 'product description parsing time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Extracting brands...
df_pro_desc['product_description_tuple']= df_pro_desc['product_description_parsed'].map(lambda x: getremove_brand_or_material_from_str(x,brand_df))
df_pro_desc['product_description_parsed_woBrand']= df_pro_desc['product_description_tuple'].map(lambda x: x[0])
df_pro_desc['brands_in_product_description']= df_pro_desc['product_description_tuple'].map(lambda x: x[1])
print 'extract brands from product_description time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and materials from text...
df_pro_desc['product_description_tuple']= df_pro_desc['product_description_parsed_woBrand'].map(lambda x: getremove_brand_or_material_from_str(x,material_df))
df_pro_desc['product_description_parsed_woBM']= df_pro_desc['product_description_tuple'].map(lambda x: x[0])
df_pro_desc['materials_in_product_description']= df_pro_desc['product_description_tuple'].map(lambda x: x[1])
df_pro_desc=df_pro_desc.drop(['product_description_tuple'],axis=1)
print 'extract materials from product_description time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### ... and tagging text using NLTK
df_pro_desc['product_description_tokens'] = col_tagger(df_pro_desc['product_description_parsed_woBM'])
print 'product decription tagging time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
df_pro_desc['product_description']= df_pro_desc['product_description'].map(lambda x: x.encode('utf-8'))
#df_attr_bullets['attribute_bullets_stemmed']=df_attr_bullets['attribute_bullets_parsed'].map(lambda x:str_stemmer_wo_parser(x))
#df_attr_bullets['attribute_bullets_stemmed_woBM']=df_attr_bullets['attribute_bullets_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
#df_attr_bullets['attribute_bullets_stemmed_woBrand']=df_attr_bullets['attribute_bullets_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
#df_pro_desc['product_description_stemmed']=df_pro_desc['product_description_parsed'].map(lambda x:str_stemmer_wo_parser(x))
#df_pro_desc['product_description_stemmed_woBM']=df_pro_desc['product_description_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
#df_pro_desc['product_description_stemmed_woBrand']=df_pro_desc['product_description_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
#print 'stemming description and bullets time:',round((time()-t0)/60,1) ,'minutes\n'
#t0 = time()
##### END OF PROCESS PRODUCT DESCRIPTIONS #######################
#################################################################
#################################################################
##### GET IMPORTANT WORDS FROM QUERY AND PRODUCT TITLE ##########
#################################################################
### We started this work on our own by observing irregularities in models predictions,
### but we ended up with something similar to extracting the top trigram from
### http://blog.kaggle.com/2015/07/22/crowdflower-winners-interview-3rd-place-team-quartet/
### We found that some words are more important than the other
### for predicting the relevance. For example, if the customer
### asks for 'kitchen faucet with side spray', she is looking for
### faucet, not for spray, side or kitchen. Therefore, faucets will
### be more relevant for this query, but sprays, sides and kitchens
### will be less relevant.
### Let us define the most important word (or keyword) 'thekey'.
### The two words before it are 'beforethekey and 'before2thekey'.
### Example: query='outdoor ceiling fan with light'
### thekey='fan'
### beforethekey='ceiling'
### before2thekey='outdoor'
### Below we build an algorithm to get such important words
### from query and product titles.
### Our task is simplified due to (1) fairly uniform structure of
### product titles and (2) small number of words in query.
### In the first step we delete irrelevant words using the following function.
### Although it may appear complex since we tried to correctly process as many
### entries as possible, but the basic logic is very simple:
### delete all words after 'with', 'for', 'in', 'that', 'on'
### as well as in some cases all words after colon ','
def cut_product_title(s):
s=s.lower()
s = re.sub('&', '&', s)
s = re.sub(' ', '', s)
s = re.sub(''', '', s)
s = re.sub(r'(?<=[0-9]),[\ ]*(?=[0-9])', '', s)
s = re.sub(r'(?<=\))(?=[a-zA-Z0-9])', ' ', s) # add space between parentheses and letters
s = re.sub(r'(?<=[a-zA-Z0-9])(?=\()', ' ', s) # add space between parentheses and letters
s = s.replace(";",". ")
s = s.replace(":"," . ")
s=s.replace("&"," and ")
s = re.sub('[^a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,\+]', ' ', s)
s= " ".join(s.split())
s = re.sub(r'(?<=[0-9])\.\ ', ' ', s)
s = re.sub(r'(?<=\ in)\.(?=[a-zA-Z])', '. ', s)
s=replace_in_parser(s)
s = re.sub(r'\-discontinued', '', s)
s = re.sub(r' \+ free app(?=$)', '', s)
s = s.replace("+"," ")
s = re.sub('\([a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]+?\)', '', s)
#s= re.sub('[\(\)]', '', s)
if " - " in s:
#srch=re.search(r'(?<= - )[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]+',s)
if re.search(r'(\d|\.|mm|cm|in|ft|mhz|volt|\bhp|\bl|oz|lb|gal) \- \d',s)==None \
and re.search(r' (sign|carpet|decal[s]*|figure[s]*)(?=$)',s)==None and re.search(r'\d \- (way\b|day\b)',s)==None:
#if ' - ' is found and the string doesnt end with word 'sign' or 'carpet' or 'decal' and not string '[0-9] - way' found
s = re.sub(r' - [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s) #greedy regular expression
if "uilt in" not in s and "uilt In" not in s:
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*])[\ ]+[I|i]n [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = s.replace(" - "," ")
if re.search(r' (sign|decal[s]*|figure[s]*)(?=$)',s)==None:
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[W|w]ith [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[W|w]ithout [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*])[\ ]+[w]/[\ a-z0-9][a-z0-9][a-z0-9\.][a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
if " fits for " not in s and " for fits " not in s:
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+fits [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
if " for lease " not in s and re.search(r' (sign|decal[s]*|figure[s]*)(?=$)',s)==None:
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[F|f]or [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z0-9\%\$\#\@\&\/\.\*])[\ ]+[T|t]hat [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r' on (wheels|a pallet|spool|bracket|3 in|blue post|360|track|spike|rock|lamp|11 in|2 in|pedestal|square base|tub|steel work)[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r' on (plinth|insulator|casters|pier base|reel|fireplace|moon|bracket|24p ebk|zinc spike|mailbox|cream chand|blue post)[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r'(?<= on white stiker)[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r' on [installing][a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
if "," in s:
srch=re.search(r'(?<=, )[a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*',s) #greedy regular expression
if srch!=None:
if len(re.sub('[^a-zA-Z\ ]', '', srch.group(0)))<25:
s = re.sub(r', [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
s = re.sub(r'(?<=recessed door reinforcer), [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)\,]*', '', s)
#s = re.sub(r'(?<=[a-zA-Z0-9]),\ [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*]) [F|f]eaturing [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\%\$\#\@\&\/\.\*]) [I|i]ncludes [a-zA-Z0-9\n\ \%\$\-\#\@\&\/\.\'\*\(\)]*', '', s)
s = re.sub(' [\#]\d+[\-\d]*[\,]*', '', s)
s = re.sub(r'(?<=[a-zA-Z\ ])\/(?=[a-zA-Z])', ' ', s)
s = re.sub(r'(?<=[a-zA-Z\ ])\-(?=[a-zA-Z])', ' ', s)
s = s.replace(",",". ")
s = s.replace("..",".")
s = s.replace("..",".")
s = s.replace("*","")
return " ".join([word.replace("-","") for word in s.split() if re.search(r'\d\-\d',word)==None])
### The next step is identify the most important words.
### We exclude brand names and similar words like 'EpoxyShield'
### (see how add_space_stop_list is created)
not_keyword_list=list(brand_df['name'][brand_df['nwords']==1])
for item in add_space_stop_list:
if len(wn.synsets(item,pos=wn.NOUN))==0:
not_keyword_list.append(item)
### We want 'thekey' to be a noun as identified by NLTK WordNet
### and NN, NNS, VBG in the sentence as identified by NLTK.pos_tagger()
### Since pos_tagger often fails, we run it two times: on full sentence with
### punctuation and on separate words. We reject the word only if in neither run
### it is identified as NN, NNS, VBG.
### We also have to create in_list
### with words that are always to be identified as thekeys. Words ending with
### '-er', '-ers', '-or', '-ors' are also thekeys.
### We exclude some words from potential thekeys, they are added to out_list.
### Once thekey is identified, we read the words to the left and consider
### them as keywords (or important words). If we encounter nouns, we continue.
### IF we encounter ['JJ','JJS', 'JJR', 'RB', 'RBS', 'RBR', 'VBG', 'VBD', 'VBN','VBP'],
### we add the word to the keywords, but stop unless the next word is 'and'.
### In other cases (word with digits or preposition etc) we just stop.
# the word lists from the following function are shared on the forum.
def get_key_words(tag_list,wordtag_list, string_output=False,out_list=not_keyword_list[:]):
i=len(tag_list)
in_list=['tv','downrod', 'sillcock', 'shelving', 'luminaire', 'paracord', 'ducting', \
'recyclamat', 'rebar', 'spackling', 'hoodie', 'placemat', 'innoculant', 'protectant', \
'colorant', 'penetrant', 'attractant', 'bibb', 'nosing', 'subflooring', 'torchiere', 'thhn',\
'lantern','epoxy','cloth','trim','adhesive','light','lights','saw','pad','polish','nose','stove',\
'elbow','elbows','lamp','door','doors','pipe','bulb','wood','woods','wire','sink','hose','tile','bath','table','duct',\
'windows','mesh','rug','rugs','shower','showers','wheels','fan','lock','rod','mirror','cabinet','shelves','paint',\
'plier','pliers','set','screw','lever','bathtub','vacuum','nut', 'nipple','straw','saddle','pouch','underlayment',\
'shade','top', 'bulb', 'bulbs', 'paint', 'oven', 'ranges', 'sharpie', 'shed', 'faucet',\
'finish','microwave', 'can', 'nozzle', 'grabber', 'tub', 'angles','showerhead', 'dehumidifier', 'shelving', 'urinal', 'mdf']
out_list= out_list +['free','height', 'width', 'depth', 'model','pcs', 'thick','pack','adhesive','steel','cordless', 'aaa' 'b', 'nm', 'hc', 'insulated','gll', 'nutmeg',\
'pnl', 'sotc','withe','stainless','chrome','beige','max','acrylic', 'cognac', 'cherry', 'ivory','electric','fluorescent', 'recessed', 'matte',\
'propane','sku','brushless','quartz','gfci','shut','sds','value','brown','white','black','red','green','yellow','blue','silver','pink',\
'gray','gold','thw','medium','type','flush',"metaliks", 'metallic', 'amp','btu','gpf','pvc','mil','gcfi','plastic', 'vinyl','aaa',\
'aluminum','brass','antique', 'brass','copper','nickel','satin','rubber','porcelain','hickory','marble','polyacrylic','golden','fiberglass',\
'nylon','lmapy','maple','polyurethane','mahogany','enamel', 'enameled', 'linen','redwood', 'sku','oak','quart','abs','travertine', 'resin',\
'birch','birchwood','zinc','pointe','polycarbonate', 'ash', 'wool', 'rockwool', 'teak','alder','frp','cellulose','abz', 'male', 'female', 'used',\
'hepa','acc','keyless','aqg','arabesque','polyurethane', 'polyurethanes','ardex','armorguard','asb', 'motion','adorne','fatpack',\
'fatmax','feet','ffgf','fgryblkg', 'douglas', 'fir', 'fleece','abba', 'nutri', 'thermal','thermoclear', 'heat', 'water', 'systemic',\
'heatgasget', 'cool', 'fusion', 'awg', 'par', 'parabolic', 'tpi', 'pint', 'draining', 'rain', 'cost', 'costs', 'costa','ecostorage',
'mtd', 'pass', 'emt', 'jeld', 'npt', 'sch', 'pvc', 'dusk', 'dawn', 'lathe','lows','pressure', 'round', 'series','impact', 'resistant','outdoor',\
'off', 'sawall', 'elephant', 'ear', 'abb', 'baby', 'feedback', 'fastback','jumbo', 'flexlock', 'instant', 'natol', 'naples','florcant',\
'canna','hammock', 'jrc', 'honeysuckle', 'honey', 'serrano','sequoia', 'amass', 'ashford', 'gal','gas', 'gasoline', 'compane','occupancy',\
'home','bakeware', 'lite', 'lithium', 'golith','gxwh', 'wht', 'heirloom', 'marine', 'marietta', 'cambria', 'campane','birmingham',\
'bellingham','chamois', 'chamomile', 'chaosaw', 'chanpayne', 'thats', 'urethane', 'champion', 'chann', 'mocha', 'bay', 'rough',\
'undermount', 'price', 'prices', 'way', 'air', 'bazaar', 'broadway', 'driveway', 'sprayway', 'subway', 'flood', 'slate', 'wet',\
'clean', 'tweed', 'weed', 'cub', 'barb', 'salem', 'sale', 'sales', 'slip', 'slim', 'gang', 'office', 'allure', 'bronze', 'banbury',\
'tuscan','tuscany', 'refinishing', 'fleam','schedule', 'doeskin','destiny', 'mean', 'hide', 'bobbex', 'pdi', 'dpdt', 'tri', 'order',\
'kamado','seahawks','weymouth', 'summit','tel','riddex', 'alick','alvin', 'ano', 'assy', 'grade', 'barranco', 'batte','banbury',\
'mcmaster', 'carr', 'ccl', 'china', 'choc', 'colle', 'cothom', 'cucbi', 'cuv', 'cwg', 'cylander', 'cylinoid', 'dcf', 'number', 'ultra',\
'diat','discon', 'disconnect', 'plantation', 'dpt', 'duomo', 'dupioni', 'eglimgton', 'egnighter','ert','euroloft', 'everready',\
'felxfx', 'financing', 'fitt', 'fosle', 'footage', 'gpf','fro', 'genis', 'giga', 'glu', 'gpxtpnrf', 'size', 'hacr', 'hardw',\
'hexagon', 'hire', 'hoo','number','cosm', 'kelston', 'kind', 'all', 'semi', 'gloss', 'lmi', 'luana', 'gdak', 'natol', 'oatu',\
'oval', 'olinol', 'pdi','penticlea', 'portalino', 'racc', 'rads', 'renat', 'roc', 'lon', 'sendero', 'adora', 'sleave', 'swu',
'tilde', 'cordoba', 'tuvpl','yel', 'acacia','mig','parties','alkaline','plexiglass', 'iii', 'watt']
output_list=[]
if i>0:
finish=False
started = False
while not finish:
i-=1
if started==False:
if (wordtag_list[i][0] not in out_list) \
and (wordtag_list[i][0] in in_list \
or (re.search(r'(?=[e|o]r[s]*\b)',wordtag_list[i][0])!=None and re.search(r'\d+',wordtag_list[i][0])==None) \
or (len(wordtag_list[i][0])>2 and re.search(r'\d+',wordtag_list[i][0])==None and len(wn.synsets(wordtag_list[i][0],pos=wn.NOUN))>0 \
and (wordtag_list[i][1] in ['NN', 'NNS','VBG'] or tag_list[i][1] in ['NN', 'NNS','VBG']) \
and len(re.sub('[^aeiouy]', '', wordtag_list[i][0]))>0 )): #exclude VBD
started = True
output_list.insert(0,wordtag_list[i])
# handle exceptions below
# 'iron' only with -ing is OK: soldering iron, seaming iron
if i>1 and wordtag_list[i][0] in ['iron','irons'] and re.search(r'ing\b',wordtag_list[i-1][0])==None:
output_list=[]
started = False
else:
if tag_list[i][1] in ['NN','NNP', 'NNPS', 'NNS']:
if len(re.sub('[^0-9]', '', tag_list[i][0]))==0 and \
len(re.sub('[^a-zA-Z0-9\-]', '', tag_list[i][0]))>2 \
and tag_list[i][0] not in ['amp','btu','gpf','pvc','mil','watt','gcfi']\
and (len(wn.synsets(tag_list[i][0]))>0 or re.search(r'(?=[e|o]r[s]*\b)',tag_list[i][0])!=None):
output_list.insert(0,tag_list[i])
elif tag_list[i][0]=='and':
output_list.insert(0,tag_list[i])
started=False
else:
if tag_list[max(0,i-1)][0]!="and" and (tag_list[i][1] not in ['VBD', 'VBN']):
finish=True
if tag_list[i][1] in ['JJ','JJS', 'JJR', 'RB', 'RBS', 'RBR', 'VBG', 'VBD', 'VBN','VBP']:
if len(re.sub('[^0-9]', '', tag_list[i][0]))==0 and \
len(re.sub('[^a-zA-Z0-9\-]', '', tag_list[i][0]))>2 \
and tag_list[i][0] not in ['amp','btu','gpf','pvc','mil','watt','gcfi']\
and len(wn.synsets(tag_list[i][0]))>0:
output_list.insert(0,tag_list[i])
if i==0:
finish=True
if string_output==True:
return " ".join([tag[0] for tag in output_list])
else:
return output_list
### Apply the function to product_title
### We have to start with product_title, not product_title_parsed,
### since punctuation is important for our task ...
df_all['product_title_cut']= df_all['product_title'].map(lambda x: cut_product_title(x).encode('utf-8'))
### ... and that is why we have to remove the brand names again
aa=list(set(list(df_all['product_title_cut'])))
my_dict={}
for i in range(0,len(aa)):
my_dict[aa[i]]=getremove_brand_or_material_from_str(aa[i],brand_df)
if (i % 5000)==0:
print "processed "+str(i)+" out of "+str(len(aa))+" unique cut product titles; "+str(round((time()-t0)/60,1))+" minutes"
df_all['product_title_cut_tuple']= df_all['product_title_cut'].map(lambda x: my_dict[x])
df_all['product_title_cut_woBrand']= df_all['product_title_cut_tuple'].map(lambda x: x[0])
df_all=df_all.drop(['product_title_cut_tuple'],axis=1)
print 'extract brands from cut product title:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
### Tagging two times: full sentences and separate words
df_all['product_title_cut_tokens'] =col_tagger(df_all['product_title_cut_woBrand'])
df_all['product_title_cut_wordtokens'] =col_wordtagger(df_all['product_title_cut_woBrand'])
### the same steps for search term, but we now we continue with the preprocessed resuts
### since punctuation is not as important in query as it is in product title
df_all['search_term_cut_woBrand']= df_all['search_term_parsed_woBrand'].map(lambda x: cut_product_title(x).encode('utf-8'))
df_all['search_term_cut_tokens'] =col_tagger(df_all['search_term_cut_woBrand'])
df_all['search_term_cut_wordtokens'] =col_wordtagger(df_all['search_term_cut_woBrand'])
### Transform tags into text, it may look like unecessary step.
### But in our work we have to frequently save processing results and recover tags from text.
### Here this transformation is used to make the _tokens variables compatibe with
### parser_mystr2tuple() function
df_all['search_term_cut_tokens']=df_all['search_term_cut_tokens'].map(lambda x: str(x))
df_all['search_term_cut_wordtokens']=df_all['search_term_cut_wordtokens'].map(lambda x: str(x))
df_all['product_title_cut_tokens']=df_all['product_title_cut_tokens'].map(lambda x: str(x))
df_all['product_title_cut_wordtokens']=df_all['product_title_cut_wordtokens'].map(lambda x: str(x))
df_all['search_term_keys']=df_all.apply(lambda x: \
get_key_words(parser_mystr2tuple(x['search_term_cut_tokens']),parser_mystr2tuple(x['search_term_cut_wordtokens']),string_output=True),axis=1)
df_all['product_title_keys']=df_all.apply(lambda x: \
get_key_words(parser_mystr2tuple(x['product_title_cut_tokens']),parser_mystr2tuple(x['product_title_cut_wordtokens']),string_output=True),axis=1)
### Now we just need to assing the last word from keywords as thekey,
### the words before it as beforethekey and before2thekey.
### One more trick: we first get this trigram from product_title,
### than use thekey from product title to choose the most similar word
### in case query contains two candidates separated by 'and'
### For example, for query 'microwave and stove' we may chose either
### 'microwave' or 'stove' depending on the thekey from product title.
def get_last_words_from_parsed_title(s):
words=s.split()
if len(words)==0:
last_word=""
word_before_last=""
word2_before_last=""
else:
last_word=words[len(words)-1]
word_before_last=""
word2_before_last=""
if len(words)>1:
word_before_last=words[len(words)-2]
if word_before_last=="and":
word_before_last=""
if len(words)>2 and word_before_last!="and":
word2_before_last=words[len(words)-3]
if word2_before_last=="and":
word2_before_last=""
return last_word, word_before_last, word2_before_last
def get_last_words_from_parsed_query(s,last_word_in_title):
words=s.split()
if len(words)==0:
last_word=""
word_before_last=""
word2_before_last=""
else:
last_word=words[len(words)-1]
word_before_last=""
word2_before_last=""
if len(words)>1:
word_before_last=words[len(words)-2]
if len(words)>2 and word_before_last!="and":
word2_before_last=words[len(words)-3]
if word2_before_last=="and":
word2_before_last=""
if word_before_last=="and":
word_before_last=""
if len(words)>2:
cmp_word=words[len(words)-3]
sm1=find_similarity(last_word,last_word_in_title)[0]
sm2=find_similarity(cmp_word,last_word_in_title)[0]
if sm1<sm2:
last_word=cmp_word
if len(words)>3:
word_before_last=words[len(words)-4]
return last_word, word_before_last, word2_before_last
### get trigram from product title
df_all['product_title_thekey_tuple']=df_all['product_title_keys'].map(lambda x: get_last_words_from_parsed_title(x))
df_all['product_title_thekey']=df_all['product_title_thekey_tuple'].map(lambda x: x[0])
df_all['product_title_beforethekey']=df_all['product_title_thekey_tuple'].map(lambda x: x[1])
df_all['product_title_before2thekey']=df_all['product_title_thekey_tuple'].map(lambda x: x[2])
df_all=df_all.drop(['product_title_thekey_tuple'],axis=1)
### get trigram from query
df_all['search_term_thekey_tuple']=df_all.apply(lambda x: \
get_last_words_from_parsed_query(x['search_term_keys'],x['product_title_thekey']),axis=1)
#df_all['thekey_info']=df_all['search_term_keys']+"\t"+df_all['product_title_thekey']
#df_all['search_term_thekey_tuple']=df_all['thekey_info'].map(lambda x: get_last_words_from_parsed_query(x.split("\t")[0],x.split("\t")[1]))
df_all['search_term_thekey']=df_all['search_term_thekey_tuple'].map(lambda x: x[0])
df_all['search_term_beforethekey']=df_all['search_term_thekey_tuple'].map(lambda x: x[1])
df_all['search_term_before2thekey']=df_all['search_term_thekey_tuple'].map(lambda x: x[2])
df_all=df_all.drop(['search_term_thekey_tuple'],axis=1)
#df_all['search_term_thekey_stemmed']=df_all['search_term_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['product_title_thekey_stemmed']=df_all['product_title_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['search_term_beforethekey_stemmed']=df_all['search_term_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['product_title_beforethekey_stemmed']=df_all['product_title_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['search_term_before2thekey_stemmed']=df_all['search_term_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
#df_all['product_title_before2thekey_stemmed']=df_all['product_title_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
print 'extracting important words time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF GET IMPORTANT WORDS FROM QUERY AND PRODUCT TITLE ###
#################################################################
#################################################################
##### STEMMING ##################################################
#################################################################
### We also exclude stopwords here.
### Sometimes people search for 'can' with the meaning 'a container'
### like in 'trash can'. That is why we keep 'can' in query and product title.
df_attr_bullets['attribute_bullets_stemmed']=df_attr_bullets['attribute_bullets_parsed'].map(lambda x:str_stemmer_wo_parser(x))
df_attr_bullets['attribute_bullets_stemmed_woBM']=df_attr_bullets['attribute_bullets_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
df_attr_bullets['attribute_bullets_stemmed_woBrand']=df_attr_bullets['attribute_bullets_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
df_pro_desc['product_description_stemmed']=df_pro_desc['product_description_parsed'].map(lambda x:str_stemmer_wo_parser(x))
df_pro_desc['product_description_stemmed_woBM']=df_pro_desc['product_description_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x))
df_pro_desc['product_description_stemmed_woBrand']=df_pro_desc['product_description_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x))
df_all['search_term_keys_stemmed']=df_all['search_term_keys'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_keys_stemmed']=df_all['product_title_keys'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_stemmed']=df_all['search_term_parsed'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_stemmed_woBM']=df_all['search_term_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_stemmed_woBrand']=df_all['search_term_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_stemmed']=df_all['product_title_parsed'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_stemmed_woBM']=df_all['product_title_parsed_woBM'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_stemmed_woBrand']=df_all['product_title_parsed_woBrand'].map(lambda x:str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_thekey_stemmed']=df_all['search_term_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_thekey_stemmed']=df_all['product_title_thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_beforethekey_stemmed']=df_all['search_term_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_beforethekey_stemmed']=df_all['product_title_beforethekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['search_term_before2thekey_stemmed']=df_all['search_term_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
df_all['product_title_before2thekey_stemmed']=df_all['product_title_before2thekey'].map(lambda x: str_stemmer_wo_parser(x,stoplist=stoplist_wo_can))
print 'stemming time:',round((time()-t0)/60,1) ,'minutes\n'
t0 = time()
##### END OF STEMMING ###########################################
#################################################################
### Save everything into files
df_all['product_title']= df_all['product_title'].map(lambda x: x.encode('utf-8'))
df_all.to_csv(PROCESSINGTEXT_DIR+"/df_train_and_test_processed_wo_google.csv", index=False)
df_attr_bullets.to_csv(PROCESSINGTEXT_DIR+"/df_attribute_bullets_processed_wo_google.csv", index=False)
df_pro_desc.to_csv(PROCESSINGTEXT_DIR+"/df_product_descriptions_processed_wo_google.csv", index=False)
print 'TOTAL PROCESSING TIME:',round((time()-t1)/60,1) ,'minutes\n'
t1 = time()
df_all=df_all.drop(list(df_all.keys()),axis=1)
df_attr_bullets=df_attr_bullets.drop(list(df_attr_bullets.keys()),axis=1)
df_pro_desc=df_pro_desc.drop(list(df_pro_desc.keys()),axis=1)
| mit |
github4ry/pathomx | pathomx/plugins/spectra/spectra_peakadj.py | 2 | 3454 | import nmrglue as ng
import numpy as np
import pandas as pd
# Get the target region from the spectra (will be using this for all calculations;
# then applying the result to the original data)
if type(input_data.columns) in [pd.Index, pd.Float64Index]:
scale = input_data.columns.values
elif type(input_data.columns) == pd.MultiIndex:
try:
scidx = input_data.columns.names.index('ppm')
except:
scidx = input_data.columns.names.index('Label')
scale = [c[scidx] for c in input_data.columns.values]
target_ppm = config.get('peak_target_ppm')
tolerance_ppm = config.get('peak_target_ppm_tolerance')
start_ppm = target_ppm - tolerance_ppm
end_ppm = target_ppm + tolerance_ppm
start = min(list(range(len(scale))), key=lambda i: abs(scale[i]-start_ppm))
end = min(list(range(len(scale))), key=lambda i: abs(scale[i]-end_ppm))
# Shift first; then scale
d = 1 if end>start else -1
data = input_data.iloc[:,start:end:d]
region_scales = scale[start:end:d]
#region_labels = labels[start:end:d]
#region_entities = dsientities[1][start:end:d]
pcentre = min(list(range(len(region_scales))), key=lambda i: abs(region_scales[i]-target_ppm)) # Base centre point to shift all spectra to
reference_peaks = []
for index, sdata in data.iterrows():
baseline = sdata.max() * .9 # 90% baseline of maximum peak within target region
locations, scales, amps = ng.analysis.peakpick.pick(sdata, pthres=baseline, algorithm='connected', est_params = True, cluster=False, table=False)
if len(locations) > 0:
reference_peaks.append({
'location':locations[0][0], #FIXME: better behaviour when >1 peak
'scale':scales[0][0],
'amplitude':amps[0],
})
else:
reference_peaks.append(None)
if config.get('shifting_enabled'):
# Take a np array for speed on shifting
shift_array = input_data.values
# Now shift the original spectra to fi
for n,refp in enumerate(reference_peaks):
if refp:
# Shift the spectra
shift = (pcentre-refp['location']) * d
# FIXME: This is painfully slow
if shift > 0:
shift_array[n, shift:-1] = shift_array[n, 0:-(shift+1)]
elif shift < 0:
shift_array[n, 0:shift-1] = shift_array[n, abs(shift):-1]
input_data = pd.DataFrame( shift_array, index=input_data.index, columns=input_data.columns)
if config.get('scaling_enabled'):
# Get mean reference peak size
reference_peak_mean = np.mean( [r['scale'] for r in reference_peaks if r ] )
print('Reference peak mean %s' % reference_peak_mean)
# Now scale; using the same peak regions & information (so we don't have to worry about something
# being shifted out of the target region in the first step)
for n,refp in enumerate(reference_peaks):
if refp:
# Scale the spectra
amplitude = reference_peak_mean/refp['amplitude']
input_data.iloc[n] *= amplitude
# -- optionally use the line widths and take max within each of these for each spectra (peak shiftiness)
# Filter the original data with those locations and output\
output_data = input_data
region = output_data.iloc[:,start:end:d]
# Generate simple result figure (using pathomx libs)
from pathomx.figures import spectra
View = spectra(output_data, styles=styles);
Region = spectra(region, styles=styles);
data = None;
| gpl-3.0 |
marcotcr/lime | lime/explanation.py | 1 | 11881 | """
Explanation class, with visualization functions.
"""
from io import open
import os
import os.path
import json
import string
import numpy as np
from .exceptions import LimeError
from sklearn.utils import check_random_state
def id_generator(size=15, random_state=None):
"""Helper function to generate random div ids. This is useful for embedding
HTML into ipython notebooks."""
chars = list(string.ascii_uppercase + string.digits)
return ''.join(random_state.choice(chars, size, replace=True))
class DomainMapper(object):
"""Class for mapping features to the specific domain.
The idea is that there would be a subclass for each domain (text, tables,
images, etc), so that we can have a general Explanation class, and separate
out the specifics of visualizing features in here.
"""
def __init__(self):
pass
def map_exp_ids(self, exp, **kwargs):
"""Maps the feature ids to concrete names.
Default behaviour is the identity function. Subclasses can implement
this as they see fit.
Args:
exp: list of tuples [(id, weight), (id,weight)]
kwargs: optional keyword arguments
Returns:
exp: list of tuples [(name, weight), (name, weight)...]
"""
return exp
def visualize_instance_html(self,
exp,
label,
div_name,
exp_object_name,
**kwargs):
"""Produces html for visualizing the instance.
Default behaviour does nothing. Subclasses can implement this as they
see fit.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
div_name: name of div object to be used for rendering(in js)
exp_object_name: name of js explanation object
kwargs: optional keyword arguments
Returns:
js code for visualizing the instance
"""
return ''
class Explanation(object):
"""Object returned by explainers."""
def __init__(self,
domain_mapper,
mode='classification',
class_names=None,
random_state=None):
"""
Initializer.
Args:
domain_mapper: must inherit from DomainMapper class
type: "classification" or "regression"
class_names: list of class names (only used for classification)
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
"""
self.random_state = random_state
self.mode = mode
self.domain_mapper = domain_mapper
self.local_exp = {}
self.intercept = {}
self.score = {}
self.local_pred = {}
if mode == 'classification':
self.class_names = class_names
self.top_labels = None
self.predict_proba = None
elif mode == 'regression':
self.class_names = ['negative', 'positive']
self.predicted_value = None
self.min_value = 0.0
self.max_value = 1.0
self.dummy_label = 1
else:
raise LimeError('Invalid explanation mode "{}". '
'Should be either "classification" '
'or "regression".'.format(mode))
def available_labels(self):
"""
Returns the list of classification labels for which we have any explanations.
"""
try:
assert self.mode == "classification"
except AssertionError:
raise NotImplementedError('Not supported for regression explanations.')
else:
ans = self.top_labels if self.top_labels else self.local_exp.keys()
return list(ans)
def as_list(self, label=1, **kwargs):
"""Returns the explanation as a list.
Args:
label: desired label. If you ask for a label for which an
explanation wasn't computed, will throw an exception.
Will be ignored for regression explanations.
kwargs: keyword arguments, passed to domain_mapper
Returns:
list of tuples (representation, weight), where representation is
given by domain_mapper. Weight is a float.
"""
label_to_use = label if self.mode == "classification" else self.dummy_label
ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs)
ans = [(x[0], float(x[1])) for x in ans]
return ans
def as_map(self):
"""Returns the map of explanations.
Returns:
Map from label to list of tuples (feature_id, weight).
"""
return self.local_exp
def as_pyplot_figure(self, label=1, **kwargs):
"""Returns the explanation as a pyplot figure.
Will throw an error if you don't have matplotlib installed
Args:
label: desired label. If you ask for a label for which an
explanation wasn't computed, will throw an exception.
Will be ignored for regression explanations.
kwargs: keyword arguments, passed to domain_mapper
Returns:
pyplot figure (barchart).
"""
import matplotlib.pyplot as plt
exp = self.as_list(label=label, **kwargs)
fig = plt.figure()
vals = [x[1] for x in exp]
names = [x[0] for x in exp]
vals.reverse()
names.reverse()
colors = ['green' if x > 0 else 'red' for x in vals]
pos = np.arange(len(exp)) + .5
plt.barh(pos, vals, align='center', color=colors)
plt.yticks(pos, names)
if self.mode == "classification":
title = 'Local explanation for class %s' % self.class_names[label]
else:
title = 'Local explanation'
plt.title(title)
return fig
def show_in_notebook(self,
labels=None,
predict_proba=True,
show_predicted_value=True,
**kwargs):
"""Shows html explanation in ipython notebook.
See as_html() for parameters.
This will throw an error if you don't have IPython installed"""
from IPython.core.display import display, HTML
display(HTML(self.as_html(labels=labels,
predict_proba=predict_proba,
show_predicted_value=show_predicted_value,
**kwargs)))
def save_to_file(self,
file_path,
labels=None,
predict_proba=True,
show_predicted_value=True,
**kwargs):
"""Saves html explanation to file. .
Params:
file_path: file to save explanations to
See as_html() for additional parameters.
"""
file_ = open(file_path, 'w', encoding='utf8')
file_.write(self.as_html(labels=labels,
predict_proba=predict_proba,
show_predicted_value=show_predicted_value,
**kwargs))
file_.close()
def as_html(self,
labels=None,
predict_proba=True,
show_predicted_value=True,
**kwargs):
"""Returns the explanation as an html page.
Args:
labels: desired labels to show explanations for (as barcharts).
If you ask for a label for which an explanation wasn't
computed, will throw an exception. If None, will show
explanations for all available labels. (only used for classification)
predict_proba: if true, add barchart with prediction probabilities
for the top classes. (only used for classification)
show_predicted_value: if true, add barchart with expected value
(only used for regression)
kwargs: keyword arguments, passed to domain_mapper
Returns:
code for an html page, including javascript includes.
"""
def jsonize(x):
return json.dumps(x, ensure_ascii=False)
if labels is None and self.mode == "classification":
labels = self.available_labels()
this_dir, _ = os.path.split(__file__)
bundle = open(os.path.join(this_dir, 'bundle.js'),
encoding="utf8").read()
out = u'''<html>
<meta http-equiv="content-type" content="text/html; charset=UTF8">
<head><script>%s </script></head><body>''' % bundle
random_id = id_generator(size=15, random_state=check_random_state(self.random_state))
out += u'''
<div class="lime top_div" id="top_div%s"></div>
''' % random_id
predict_proba_js = ''
if self.mode == "classification" and predict_proba:
predict_proba_js = u'''
var pp_div = top_div.append('div')
.classed('lime predict_proba', true);
var pp_svg = pp_div.append('svg').style('width', '100%%');
var pp = new lime.PredictProba(pp_svg, %s, %s);
''' % (jsonize([str(x) for x in self.class_names]),
jsonize(list(self.predict_proba.astype(float))))
predict_value_js = ''
if self.mode == "regression" and show_predicted_value:
# reference self.predicted_value
# (svg, predicted_value, min_value, max_value)
predict_value_js = u'''
var pp_div = top_div.append('div')
.classed('lime predicted_value', true);
var pp_svg = pp_div.append('svg').style('width', '100%%');
var pp = new lime.PredictedValue(pp_svg, %s, %s, %s);
''' % (jsonize(float(self.predicted_value)),
jsonize(float(self.min_value)),
jsonize(float(self.max_value)))
exp_js = '''var exp_div;
var exp = new lime.Explanation(%s);
''' % (jsonize([str(x) for x in self.class_names]))
if self.mode == "classification":
for label in labels:
exp = jsonize(self.as_list(label))
exp_js += u'''
exp_div = top_div.append('div').classed('lime explanation', true);
exp.show(%s, %d, exp_div);
''' % (exp, label)
else:
exp = jsonize(self.as_list())
exp_js += u'''
exp_div = top_div.append('div').classed('lime explanation', true);
exp.show(%s, %s, exp_div);
''' % (exp, self.dummy_label)
raw_js = '''var raw_div = top_div.append('div');'''
if self.mode == "classification":
html_data = self.local_exp[labels[0]]
else:
html_data = self.local_exp[self.dummy_label]
raw_js += self.domain_mapper.visualize_instance_html(
html_data,
labels[0] if self.mode == "classification" else self.dummy_label,
'raw_div',
'exp',
**kwargs)
out += u'''
<script>
var top_div = d3.select('#top_div%s').classed('lime top_div', true);
%s
%s
%s
%s
</script>
''' % (random_id, predict_proba_js, predict_value_js, exp_js, raw_js)
out += u'</body></html>'
return out
| bsd-2-clause |
DailyActie/Surrogate-Model | examples/iris.py | 1 | 1597 | # MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
y_true = [0, 1, 2, 3]
y_pred = [0, 2, 1, 3]
from sklearn.metrics import fbeta_score, make_scorer
ftwo_scorer = make_scorer(fbeta_score, beta=2)
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, scoring=ftwo_scorer)
| mit |
chrisburr/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
rustychris/stompy | stompy/model/delft/dflow_model.py | 1 | 51105 | """
Automate parts of setting up a DFlow hydro model.
TODO:
allow for setting grid bathy from the model instance
"""
import os,shutil,glob,inspect
import six
import logging
log=logging.getLogger('DFlowModel')
import copy
import numpy as np
import xarray as xr
import pandas as pd
from shapely import geometry
import stompy.model.delft.io as dio
from stompy import xr_utils
from stompy.io.local import noaa_coops, hycom
from stompy import utils, filters, memoize
from stompy.spatial import wkb2shp, proj_utils
from stompy.model.delft import dfm_grid
import stompy.grid.unstructured_grid as ugrid
from . import io as dio
from . import waq_scenario
from .. import hydro_model as hm
class DFlowModel(hm.HydroModel,hm.MpiModel):
# If these are the empty string, then assumes that the executables are
# found in existing $PATH
dfm_bin_dir="" # .../bin giving directory containing dflowfm
dfm_bin_exe='dflowfm'
mdu_basename='flowfm.mdu'
ref_date=None
mdu=None
# If set, a DFlowModel instance which will be continued
restart_from=None
# If True, initialize to WaqOnlineModel instance.
dwaq=False
# Specify location of proc_def.def file:
waq_proc_def=None
# flow and source/sink BCs will get the adjacent nodes dredged
# down to this depth in order to ensure the impose flow doesn't
# get blocked by a dry edge. Set to None to disable.
# This has moved to just the BC objects, and removed here to avoid
# confusion.
# dredge_depth=-1.0
def __init__(self,*a,**kw):
super(DFlowModel,self).__init__(*a,**kw)
self.structures=[]
self.load_default_mdu()
if self.restart_from is not None:
self.set_restart_from(self.restart_from)
if self.dwaq is True:
self.dwaq=waq_scenario.WaqOnlineModel(model=self)
def load_default_mdu(self):
"""
Load a default set of config values from data/defaults-r53925.mdu
"""
# Updated defaults-r53925.mdu by removing settings that 2021.03
# complains about.
fn=os.path.join(os.path.dirname(__file__),"data","defaults-2021.03.mdu")
self.load_mdu(fn)
# And some extra settings to make it compatible with this script
self.mdu['external forcing','ExtForceFile']='FlowFM.ext'
def clean_run_dir(self):
"""
Clean out most of the run dir, deleting files known to be
created by DFlowModel
"""
patts=['*.pli','*.tim','*.t3d','*.mdu','FlowFM.ext','*_net.nc','DFM_*', '*.dia',
'*.xy*','initial_conditions*','dflowfm-*.log']
for patt in patts:
matches=glob.glob(os.path.join(self.run_dir,patt))
for m in matches:
if os.path.isfile(m):
os.unlink(m)
elif os.path.isdir(m):
shutil.rmtree(m)
else:
raise Exception("What is %s ?"%m)
def write_forcing(self,overwrite=True):
bc_fn=self.ext_force_file()
assert bc_fn,"DFM script requires old-style BC file. Set [external forcing] ExtForceFile"
if overwrite and os.path.exists(bc_fn):
os.unlink(bc_fn)
utils.touch(bc_fn)
super(DFlowModel,self).write_forcing()
def set_grid(self,grid):
super(DFlowModel,self).set_grid(grid)
# Specific to d-flow -- see if it's necessary to copy node-based depth
# to node_z_bed.
# Used to be that 'depth' was used as a node field, and it was implicitly
# positive-up. trying to shift away from 'depth' being a positive-up
# quantity, and instead use 'z_bed' and specifically 'node_z_bed'
# for a node-centered, positive-up bathymetry value.
node_fields=self.grid.nodes.dtype.names
if 'node_z_bed' not in node_fields:
if 'z_bed' in node_fields:
self.grid.add_node_field('node_z_bed',self.grid.nodes['z_bed'])
self.log.info("Duplicating z_bed to node_z_bed for less ambiguous naming")
elif 'depth' in node_fields:
self.grid.add_node_field('node_z_bed',self.grid.nodes['depth'])
self.log.info("Duplicating depth to node_z_bed for less ambiguous naming, and assuming it was already positive-up")
default_grid_target_filename='grid_net.nc'
def grid_target_filename(self):
"""
The filename, relative to self.run_dir, of the grid. Not guaranteed
to exist, and if no grid has been set, or the grid has no filename information,
this will default to self.default_grid_target_filename
"""
if self.grid is None or self.grid.filename is None:
return self.default_grid_target_filename
else:
grid_fn=self.grid.filename
if not grid_fn.endswith('_net.nc'):
if grid_fn.endswith('.nc'):
grid_fn=grid_fn.replace('.nc','_net.nc')
else:
grid_fn=grid_fn+"_net.nc"
return os.path.basename(grid_fn)
def dredge_boundary(self,linestring,dredge_depth):
super(DFlowModel,self).dredge_boundary(linestring,dredge_depth,node_field='node_z_bed',
edge_field=None,cell_field=None)
def dredge_discharge(self,point,dredge_depth):
super(DFlowModel,self).dredge_discharge(point,dredge_depth,node_field='node_z_bed',
edge_field=None,cell_field=None)
def write_grid(self):
"""
Write self.grid to the run directory.
Must be called after MDU is updated. Should also be called
after write_forcing(), since some types of BCs can update
the grid (dredging boundaries)
"""
dest=os.path.join(self.run_dir, self.mdu['geometry','NetFile'])
self.grid.write_dfm(dest,overwrite=True,)
def subdomain_grid_filename(self,proc):
base_grid_name=self.mdu.filepath(('geometry','NetFile'))
proc_grid_name=base_grid_name.replace('_net.nc','_%04d_net.nc'%proc)
return proc_grid_name
def subdomain_grid(self,proc):
"""
For a run that has been partitioned, load the grid for a specific
subdomain.
"""
g=ugrid.UnstructuredGrid.read_dfm(self.subdomain_grid_filename(proc))
return g
def ext_force_file(self):
return self.mdu.filepath(('external forcing','ExtForceFile'))
def load_template(self,fn):
""" more generic name for load_mdu """
return self.load_mdu(fn)
def load_mdu(self,fn):
"""
Reads an mdu into self.mdu. Does not update mdu_basename,
such that self.write() will still use self.mdu_basename.
"""
self.mdu=dio.MDUFile(fn)
@classmethod
def load(cls,fn):
"""
Populate Model instance from an existing run
"""
fn=cls.to_mdu_fn(fn) # in case fn was a directory
if fn is None:
# no mdu was found
return None
model=DFlowModel()
model.load_mdu(fn)
model.mdu_basename=os.path.basename(fn)
try:
model.grid = ugrid.UnstructuredGrid.read_dfm(model.mdu.filepath( ('geometry','NetFile') ))
except FileNotFoundError:
log.warning("Loading model from %s, no grid could be loaded"%fn)
model.grid=None
d=os.path.dirname(fn) or "."
model.set_run_dir(d,mode='existing')
# infer number of processors based on mdu files
# Not terribly robust if there are other files around..
sub_mdu=glob.glob( fn.replace('.mdu','_[0-9][0-9][0-9][0-9].mdu') )
if len(sub_mdu)>0:
model.num_procs=len(sub_mdu)
else:
# probably better to test whether it has even been processed
model.num_procs=1
ref,start,stop=model.mdu.time_range()
model.ref_date=ref
model.run_start=start
model.run_stop=stop
model.load_gazetteer_from_run()
return model
def load_gazetteer_from_run(self):
"""
Populate gazetteers with geometry read in from an existing run.
So far only gets stations. Will have to come back to handle
transects, regions, etc. and maybe even read back in BC locations,
or query output history files.
"""
fn=self.mdu.filepath(['output','ObsFile'])
if fn and os.path.exists(fn):
stations=pd.read_csv(self.mdu.filepath(['output','ObsFile']),
sep=' ',names=['x','y','name'],quotechar="'")
stations['geom']=[geometry.Point(x,y) for x,y in stations.loc[ :, ['x','y']].values ]
self.gazetteers.append(stations.to_records())
def parse_old_bc(self,fn):
"""
Parse syntax of old-style BC files into a list of dictionaries.
Keys are forced upper case.
"""
def key_value(s):
k,v=s.strip().split('=',1)
k,v=k.strip().upper(),v.strip()
return k,v
rec=None
recs=[]
with open(fn,'rt') as fp:
while 1:
line=fp.readline()
if line=="": break
line=line.split('#')[0].strip()
if not line: continue # blank line or comment
k,v=key_value(line)
if k=='QUANTITY':
rec={k:v}
recs.append(rec)
else:
rec[k]=v
return recs
def load_bcs(self):
"""
Woefully inadequate parsing of boundary condition data.
For now, returns a list of dictionaries.
TODO: populate self.bcs, optionally.
Handle other BCs like at least flow.
"""
ext_fn=self.mdu.filepath(['external forcing','ExtForceFile'])
ext_new_fn=self.mdu.filepath(['external forcing','ExtForceFileNew'])
recs=self.parse_old_bc(ext_fn)
for rec in recs:
if 'FILENAME' in rec:
# The ext file doesn't have a notion of name.
# punt via the filename
rec['name'],ext=os.path.splitext(rec['FILENAME'])
else:
rec['name']=rec['QUANTITY'].upper()
ext=None
if ext=='.pli':
pli_fn=os.path.join(os.path.dirname(ext_fn),
rec['FILENAME'])
pli=dio.read_pli(pli_fn)
rec['pli']=pli
rec['coordinates']=rec['pli'][0][1]
geom=geometry.LineString(rec['coordinates'])
rec['geom']=geom
# timeseries at one or more points along boundary:
tims=[]
for node_i,node_xy in enumerate(rec['coordinates']):
tim_fn=pli_fn.replace('.pli','_%04d.tim'%(node_i+1))
if os.path.exists(tim_fn):
t_ref,t_start,t_stop=self.mdu.time_range()
tim_ds=dio.read_dfm_tim(tim_fn,t_ref,columns=['stage'])
tim_ds['x']=(),node_xy[0]
tim_ds['y']=(),node_xy[1]
tims.append(tim_ds)
data=xr.concat(tims,dim='node')
rec['data']=data
elif ext=='.xyz':
xyz_fn=os.path.join(os.path.dirname(ext_fn),
rec['FILENAME'])
df=pd.read_csv(xyz_fn,sep='\s+',names=['x','y','z'])
ds=xr.Dataset()
ds['x']=('sample',),df['x']
ds['y']=('sample',),df['y']
ds['z']=('sample',),df['z']
ds=ds.set_coords(['x','y'])
rec['data']=ds.z
else:
pli=geom=pli_fn=None # avoid pollution
if rec['QUANTITY'].upper()=='WATERLEVELBND':
bc=hm.StageBC(name=rec['name'],geom=rec['geom'])
rec['bc']=bc
elif rec['QUANTITY'].upper()=='DISCHARGEBND':
bc=hm.FlowBC(name=rec['name'],geom=geom)
rec['bc']=bc
if 'data' in rec:
# Single flow value, no sense of multiple time series
rec['data']=rec['data'].isel(node=0).rename({'stage':'flow'})
else:
print("Reading discharge boundary, did not find data (%s)"%tim_fn)
elif rec['QUANTITY'].upper()=='FRICTIONCOEFFICIENT':
rec['bc']=hm.RoughnessBC(name=rec['name'],data_array=rec['data'])
else:
print("Not implemented: reading BC quantity=%s"%rec['QUANTITY'])
return recs
@classmethod
def to_mdu_fn(cls,path):
"""
coerce path that is possibly a directory to a best guess
of the MDU path. file paths are left unchanged. returns None
if path is a directory but no mdu files is there.
"""
# all mdu files, regardless of case
if not os.path.isdir(path):
return path
fns=[os.path.join(path,f) for f in os.listdir(path) if f.lower().endswith('.mdu')]
# assume shortest is the one that hasn't been partitioned
if len(fns)==0:
return None
unpartitioned=np.argmin([len(f) for f in fns])
return fns[unpartitioned]
def close(self):
"""
Close open file handles -- this can help on windows where
having a file open prevents it from being deleted.
"""
# nothing right now
pass
def partition(self,partition_grid=None):
if self.num_procs<=1:
return
# precompiled 1.5.2 linux binaries are able to partition the mdu okay,
# so switch to always using dflowfm to partition grid and mdu.
# unfortunately there does not appear to be an option to only partition
# the mdu.
if partition_grid is None:
partition_grid=not self.restart
if partition_grid:
# oddly, even on windows, dflowfm requires only forward
# slashes in the path to the mdu (ver 1.4.4)
# since run_dflowfm uses run_dir as the working directory
# here we strip to the basename
cmd=["--partition:ndomains=%d:icgsolver=6"%self.num_procs,
os.path.basename(self.mdu.filename)]
self.run_dflowfm(cmd,mpi=False)
else:
# Copy the partitioned network files:
for proc in range(self.num_procs):
old_grid_fn=self.restart_from.subdomain_grid_filename(proc)
new_grid_fn=self.subdomain_grid_filename(proc)
print("Copying pre-partitioned grid files: %s => %s"%(old_grid_fn,new_grid_fn))
shutil.copyfile(old_grid_fn,new_grid_fn)
# not a cross platform solution!
gen_parallel=os.path.join(self.dfm_bin_dir,"generate_parallel_mdu.sh")
cmd=[gen_parallel,os.path.basename(self.mdu.filename),"%d"%self.num_procs,'6']
return utils.call_with_path(cmd,self.run_dir)
_dflowfm_exe=None
@property
def dflowfm_exe(self):
if self._dflowfm_exe is None:
p=os.path.join(self.dfm_bin_dir,self.dfm_bin_exe)
if os.path.sep!="/":
p=p.replace("/",os.path.sep)
return p
else:
return self._dflowfm_exe
@dflowfm_exe.setter
def dflowfm_exe(self,v):
self._dflowfm_exe=v
def run_dflowfm(self,cmd,mpi='auto',wait=True):
"""
Invoke the dflowfm executable with the list of
arguments given in cmd=[arg1,arg2, ...]
mpi: generally if self.num_procs>1, mpi will be used. this
can be set to False or 0, in which case mpi will not be used
even when num_procs is >1. This is useful for partition which
runs single-core.
wait: True: do not return until the command finishes.
False: return immediately.
For now, the backend can only support one or the other, depending
on platform. See hydro_model.py:MpiModel for details.
"""
if mpi=='auto':
num_procs=self.num_procs
else:
num_procs=1
if num_procs>1:
real_cmd=( [self.dflowfm_exe] + cmd )
return self.mpirun(real_cmd,working_dir=self.run_dir,wait=wait)
else:
real_cmd=[self.dflowfm_exe]+cmd
self.log.info("Running command: %s"%(" ".join(real_cmd)))
return utils.call_with_path(real_cmd,self.run_dir)
def run_simulation(self,threads=1,extra_args=[]):
"""
Start simulation.
threads: if specified, pass on desired number of openmp threads to dfm.
extra_args: additional list of other commandline arguments. Note that
arguments must be split up into a list (e.g. ["--option","value"] as
opposed to "--option value").
"""
cmd=[]
if threads is not None:
cmd += ["-t","%d"%threads]
cmd += ["--autostartstop",os.path.basename(self.mdu.filename)]
if self.dwaq:
cmd=self.dwaq.update_command(cmd)
cmd += extra_args
return self.run_dflowfm(cmd=cmd)
@classmethod
def run_completed(cls,fn):
"""
fn: path to mdu file. will attempt to guess the right mdu if a directory
is provided, but no guarantees.
returns: True if the file exists and the folder contains a run which
ran to completion. Otherwise False.
"""
if not os.path.exists(fn):
return False
model=cls.load(fn)
if model is not None:
result=model.is_completed()
model.close()
else:
result=False
return result
def is_completed(self):
"""
return true if the model has been run.
this can be tricky to define -- here completed is based on
a report in a diagnostic that the run finished.
this doesn't mean that all output files are present.
"""
root_fn=self.mdu.filename[:-4] # drop .mdu suffix
# Look in multiple locations for diagnostic file.
# In older DFM, MPI runs placed it next to mdu, while
# serial and newer DFM (>=1.6.2?) place it in
# output folder
dia_fns=[]
dia_fn_base=os.path.basename(root_fn)
if self.num_procs>1:
dia_fn_base+='_0000.dia'
else:
dia_fn_base+=".dia"
dia_fns.append(os.path.join(self.run_dir,dia_fn_base))
dia_fns.append(os.path.join(self.run_dir,
"DFM_OUTPUT_%s"%self.mdu.name,
dia_fn_base))
for dia_fn in dia_fns:
assert dia_fn!=self.mdu.filename,"Probably case issues with %s"%dia_fn
if os.path.exists(dia_fn):
break
else:
return False
# Read the last 1000 bytes
with open(dia_fn,'rb') as fp:
fp.seek(0,os.SEEK_END)
tail_size=min(fp.tell(),10000)
fp.seek(-tail_size,os.SEEK_CUR)
# This may not be py2 compatible!
tail=fp.read().decode(errors='ignore')
return "Computation finished" in tail
def update_config(self):
"""
Update fields in the mdu object with data from self.
"""
if self.mdu is None:
self.mdu=dio.MDUFile()
self.mdu.set_time_range(start=self.run_start,stop=self.run_stop,
ref_date=self.ref_date)
self.mdu.set_filename(os.path.join(self.run_dir,self.mdu_basename))
self.mdu['geometry','NetFile'] = self.grid_target_filename()
# Try to allow for the caller handling observation and cross-section
# files externally or through the interface -- to that end, don't
# overwrite ObsFile or CrsFile, but if internally there are point/
# line observations set, make sure that there is a filename there.
if len(self.mon_points)>0 and not self.mdu['output','ObsFile']:
self.mdu['output','ObsFile']="obs_points.xyn"
if len(self.mon_sections)>0 and not self.mdu['output','CrsFile']:
self.mdu['output','CrsFile']="obs_sections.pli"
self.update_initial_water_level()
if self.dwaq:
# This updates
# a few things in self.mdu
# Also actually writes some output, though that could be
# folded into a later part of the process if it turns out
# the dwaq config depends on reading some of the DFM
# details.
self.dwaq.write_waq()
def write_config(self):
# Assumes update_config() already called
self.write_structures() # updates mdu
self.write_monitors()
log.info("Writing MDU to %s"%self.mdu.filename)
self.mdu.write()
def write_monitors(self):
self.write_monitor_points()
self.write_monitor_sections()
def write_monitor_points(self):
fn=self.mdu.filepath( ('output','ObsFile') )
if fn is None: return
with open(fn,'at') as fp:
for i,mon_feat in enumerate(self.mon_points):
try:
name=mon_feat['name']
except KeyError:
name="obs_pnt_%03d"%i
xy=np.array(mon_feat['geom'])
fp.write("%.3f %.3f '%s'\n"%(xy[0],xy[1],name))
def write_monitor_sections(self):
fn=self.mdu.filepath( ('output','CrsFile') )
if fn is None: return
with open(fn,'at') as fp:
for i,mon_feat in enumerate(self.mon_sections):
try:
name=mon_feat['name']
except KeyError:
name="obs_sec_%03d"%i
xy=np.array(mon_feat['geom'])
dio.write_pli(fp,[ (name,xy) ])
def add_Structure(self,**kw):
self.structures.append(kw)
def write_structures(self):
structure_file='structures.ini'
if len(self.structures)==0:
return
self.mdu['geometry','StructureFile']=structure_file
with open( self.mdu.filepath(('geometry','StructureFile')),'wt') as fp:
for s in self.structures:
lines=[
"[structure]",
"type = %s"%s['type'],
"id = %s"%s['name'],
"polylinefile = %s.pli"%s['name']
]
for k in s:
if k in ['type','name','geom']: continue
if isinstance(s[k],xr.DataArray):
log.warning(f"{k} appears to be data")
tim_base=f"{s['name']}_{k}.tim"
tim_fn=os.path.join(self.run_dir,tim_base)
self.write_tim(s[k],tim_fn)
lines.append( "%s = %s"%(k,tim_base) )
else:
lines.append( "%s = %s"%(k,s[k]) )
lines.append("\n")
# "door_height = %.3f"%s['door_height'],
# "lower_edge_level = %.3f"%s['lower_edge_level'],
# "opening_width = %.3f"%s['opening_width'],
# "sill_level = %.3f"%s['sill_level'],
# "horizontal_opening_direction = %s"%s['horizontal_opening_direction'],
# "\n"
fp.write("\n".join(lines))
pli_fn=os.path.join(self.run_dir,s['name']+'.pli')
if 'geom' in s:
geom=s['geom']
if isinstance(geom,np.ndarray):
geom=geometry.LineString(geom)
else:
geom=self.get_geometry(name=s['name'])
assert geom.type=='LineString'
pli_data=[ (s['name'], np.array(geom.coords)) ]
dio.write_pli(pli_fn,pli_data)
# some read/write methods which may have to refer to model state to properly
# parse inputs.
def read_bc(self,fn):
"""
Read a new-style BC file into an xarray dataset
"""
return dio.read_dfm_bc(fn)
def read_tim(self,fn,time_unit=None,columns=['val1','val2','val3']):
"""
Parse a tim file to xarray Dataset. This needs to be a model method so
that we know the units, and reference date. Currently, this immediately
reads the file, which may have to change in the future for performance
or ease-of-use reasons.
time_unit: 'S' for seconds, 'M' for minutes. Relative to model reference
time.
returns Dataset with 'time' dimension, and data columns labeled according
to columns.
"""
if time_unit is None:
# time_unit=self.mdu['time','Tunit']
# always minutes, unless overridden by caller
time_unit='M'
ref_time,_,_ = self.mdu.time_range()
return dio.read_dfm_tim(fn,time_unit=time_unit,
ref_time=ref_time,
columns=columns)
def write_bc(self,bc):
if isinstance(bc,hm.StageBC):
self.write_stage_bc(bc)
elif isinstance(bc,hm.SourceSinkBC):
self.write_source_bc(bc)
elif isinstance(bc,hm.FlowBC):
self.write_flow_bc(bc)
elif isinstance(bc,hm.WindBC):
self.write_wind_bc(bc)
elif isinstance(bc,hm.RoughnessBC):
self.write_roughness_bc(bc)
elif isinstance(bc,hm.ScalarBC):
self.write_scalar_bc(bc)
else:
super(DFlowModel,self).write_bc(bc)
# If True, timesteps in the forcing data beyond the run
# will be trimmed out.
bc_trim_time=True
def write_tim(self,da,file_path,trim_time=None):
"""
Write a DFM tim file based on the timeseries in the DataArray.
da must have a time dimension. No support yet for vector-values here.
file_path is relative to the working directory of the script, not
the run_dir.
"""
if trim_time is None:
trim_time=self.bc_trim_time
ref_date,start,stop = self.mdu.time_range()
dt=np.timedelta64(60,'s') # always minutes
if 'time' not in da.dims:
pad=np.timedelta64(86400,'s')
times=np.array([start-pad,stop+pad])
values=np.array([da.values,da.values])
else:
# Be sure time is the first dimension
dim_order=['time'] + [d for d in da.dims if d!='time']
da=da.transpose(*dim_order)
times=da.time.values
values=da.values
if trim_time:
sel=(times>=start)&(times<=stop)
if sum(sel) > 1:
# Expand by one
sel[:-1] = sel[1:] | sel[:-1]
sel[1:] = sel[1:] | sel[:-1]
times=times[sel]
values=values[sel]
else:
times = [start, stop]
closest_val = values[times.index(min(times, key=lambda t: abs(t - start)))]
log.warning(f'No data for simulation period: {start} - {stop}. Setting value to: {closest_val}')
values = [closest_val, closest_val]
elapsed_time=(times - ref_date)/dt
data=np.c_[elapsed_time,values]
np.savetxt(file_path,data)
def write_stage_bc(self,bc):
self.write_gen_bc(bc,quantity='stage')
def write_flow_bc(self,bc):
self.write_gen_bc(bc,quantity='flow')
if (bc.dredge_depth is not None) and (self.restart_from is None):
# Additionally modify the grid to make sure there is a place for inflow to
# come in.
log.info("Dredging grid for flow BC %s"%bc.name)
self.dredge_boundary(np.array(bc.geom.coords),bc.dredge_depth)
else:
log.info("dredging disabled")
def write_source_bc(self,bc):
# DFM source/sinks have salinity and temperature attached
# the same data file.
# the pli file can have a single entry, and include a z coordinate,
# based on lsb setup
# Source Sink BCs in DFM have to include all of the scalars in one go.
# Build a list of scalar names and default BCs, then scan for any specified
# scalar BCs to use instead of defaults
scalar_names=[] # forced to lower case
scalar_das=[]
# In the case of a single-ended source/sink, these
# should pull default value from the model config, instead of
# assuming 0.0
single_ended=bc.geom.geom_type=='Point'
if int(self.mdu['physics','Salinity']):
scalar_names.append('salinity')
if single_ended:
default=self.mdu['physics','InitialSalinity']
if default is None: default=0.0
else: default=float(default)
else:
default=0.0
scalar_das.append(xr.DataArray(default,name='salinity'))
if int(self.mdu['physics','Temperature']):
scalar_names.append('temperature')
if single_ended:
default=self.mdu['physics','InitialTemperature']
if default is None: default=0.0
else: default=float(default)
else:
default=0.0
scalar_das.append(xr.DataArray(default,name='temp'))
if self.dwaq:
for sub in self.dwaq.substances:
scalar_names.append(sub.lower())
if single_ended:
default=self.dwaq.substances[sub].initial.default
else:
default=0.0
scalar_das.append(xr.DataArray(default,name=sub))
salt_bc=None
temp_bc=None
for scalar_bc in self.bcs:
if isinstance(scalar_bc, hm.ScalarBC) and scalar_bc.parent==bc:
scalar=scalar_bc.scalar.lower()
try:
idx=scalar_names.index( scalar )
except ValueError:
raise Exception("Scalar %s not in known list %s"%(scalar,scalar_names))
scalar_das[idx]=scalar_bc.data()
# Source/sink bcs in DFM include salinity and temperature, as well as any tracers
# from dwaq
das=[bc.data()] + scalar_das
# merge data arrays including time
# write_tim has been updated to transpose time to be the first dimension
# as needed, so this should be okay
# But we do need to broadcast before they can be concatenated.
das=xr.broadcast(*das)
# 'minimal' here avoids a crash if one of the dataarrays has an
# extra coordinate that isn't actually used (like a singleton coordinate
# from an isel() )
da_combined=xr.concat(das,dim='component',coords='minimal')
self.write_gen_bc(bc,quantity='source',da=da_combined)
if (bc.dredge_depth is not None) and (self.restart_from is None):
# Additionally modify the grid to make sure there is a place for inflow to
# come in.
log.info("Dredging grid for source/sink BC %s"%bc.name)
# These are now class methods using a generic implementation in HydroModel
# may need some tlc
self.dredge_discharge(np.array(bc.geom.coords),bc.dredge_depth)
else:
log.info("dredging disabled")
def write_gen_bc(self,bc,quantity,da=None):
"""
handle the actual work of writing flow and stage BCs.
quantity: 'stage','flow','source'
da: override value for bc.data()
"""
# 2019-09-09 RH: the automatic suffix is a bit annoying. it is necessary
# when adding scalars, but for any one BC, only one of stage, flow or source
# would be present. Try dropping the suffix here.
bc_id=bc.name # +"_" + quantity
assert isinstance(bc.geom_type,list),"Didn't fully refactor, looks like"
if (bc.geom is None) and (None not in bc.geom_type):
raise Exception("BC %s, name=%s has no geometry. Maybe missing from shapefiles?"%(bc,bc.name))
assert bc.geom.type in bc.geom_type
coords=np.array(bc.geom.coords)
ndim=coords.shape[1] # 2D or 3D geometry
# Special handling when it's a source/sink, with z/z_src specified
if quantity=='source':
if ndim==2 and bc.z is not None:
# construct z
missing=-9999.
z_coords=missing*np.ones(coords.shape[0],np.float64)
for z_val,idx in [ (bc.z,-1),
(bc.z_src,0) ]:
if z_val is None: continue
if z_val=='bed':
z_val=-10000
elif z_val=='surface':
z_val=10000
z_coords[idx]=z_val
if z_coords[0]==missing:
z_coords[0]=z_coords[-1]
# middle coordinates, if any, don't matter
coords=np.c_[ coords, z_coords ]
ndim=3
pli_data=[ (bc_id, coords) ]
if ndim==2:
pli_fn=bc_id+'.pli'
else:
pli_fn=bc_id+'.pliz'
dio.write_pli(os.path.join(self.run_dir,pli_fn),pli_data)
with open(self.ext_force_file(),'at') as fp:
lines=[]
method=3 # default
if quantity=='stage':
lines.append("QUANTITY=waterlevelbnd")
tim_path=os.path.join(self.run_dir,bc_id+"_0001.tim")
elif quantity=='flow':
lines.append("QUANTITY=dischargebnd")
tim_path=os.path.join(self.run_dir,bc_id+"_0001.tim")
elif quantity=='source':
lines.append("QUANTITY=discharge_salinity_temperature_sorsin")
method=1 # not sure how this is different
tim_path=os.path.join(self.run_dir,bc_id+".tim")
else:
assert False
lines+=["FILENAME=%s"%pli_fn,
"FILETYPE=9",
"METHOD=%d"%method,
"OPERAND=O",
""]
fp.write("\n".join(lines))
if da is None:
da=bc.data()
# assert len(da.dims)<=1,"Only ready for dimensions of time or none"
self.write_tim(da,tim_path)
def write_wind_bc(self,bc):
assert bc.geom is None,"Spatially limited wind not yet supported"
tim_fn=bc.name+".tim"
tim_path=os.path.join(self.run_dir,tim_fn)
# write_config()
with open(self.ext_force_file(),'at') as fp:
lines=["QUANTITY=windxy",
"FILENAME=%s"%tim_fn,
"FILETYPE=2",
"METHOD=1",
"OPERAND=O",
"\n"]
fp.write("\n".join(lines))
self.write_tim(bc.data(),tim_path)
def write_scalar_bc(self,bc):
bc_id=bc.name+"_"+bc.scalar
parent_bc=bc.parent
if isinstance(parent_bc,hm.SourceSinkBC):
log.debug("BC %s should be handled by SourceSink"%bc_id)
return
assert isinstance(parent_bc, (hm.StageBC,hm.FlowBC)),"Haven't implemented point-source scalar yet"
assert parent_bc.geom.type=='LineString'
pli_data=[ (bc_id, np.array(parent_bc.geom.coords)) ]
pli_fn=bc_id+'.pli'
dio.write_pli(os.path.join(self.run_dir,pli_fn),pli_data)
if isinstance(bc, DelwaqScalarBC):
quant=f'tracerbnd{bc.scalar}'
elif bc.scalar=='salinity':
quant='salinitybnd'
elif bc.scalar=='temperature':
quant='temperaturebnd'
else:
self.log.info("scalar '%s' will be passed to DFM verbatim"%bc.scalar)
quant=bc.scalar
with open(self.ext_force_file(),'at') as fp:
lines=["QUANTITY=%s"%quant,
"FILENAME=%s"%pli_fn,
"FILETYPE=9",
"METHOD=3",
"OPERAND=O",
"\n"
]
fp.write("\n".join(lines))
da=bc.data()
# Write tim
assert len(da.dims)<=1,"Only ready for dimensions of time or none"
tim_path=os.path.join(self.run_dir,bc_id+"_0001.tim")
self.write_tim(da,tim_path)
def write_roughness_bc(self,bc):
# write_config()
xyz_fn=bc.name+".xyz"
xyz_path=os.path.join(self.run_dir,xyz_fn)
with open(self.ext_force_file(),'at') as fp:
lines=["QUANTITY=frictioncoefficient",
"FILENAME=%s"%xyz_fn,
"FILETYPE=7",
"METHOD=4",
"OPERAND=O",
"\n"
]
fp.write("\n".join(lines))
# write_data()
da=bc.data()
xyz=np.c_[ da.x.values,
da.y.values,
da.values ]
np.savetxt(xyz_path,xyz)
def initial_water_level(self):
"""
some BC methods which want a depth need an estimate of the water surface
elevation, and the initial water level is as good a guess as any.
"""
return float(self.mdu['geometry','WaterLevIni'])
def update_initial_water_level(self):
"""
Automatically set an initial water level based on the first
StageBC. If no stage BC is found, makes no changes, otherwise
updates self.mdu. Currently not smart about MultiBCs.
"""
for bc in self.bcs:
if isinstance(bc,hm.StageBC):
wl=bc.evaluate(t=self.run_start)
self.mdu['geometry','WaterLevIni']=float(wl)
self.log.info("Pulling initial water level from BC: %.3f"%wl)
return
self.log.info("Could not find BC to get initial water level")
def map_outputs(self):
"""
return a list of map output files
"""
output_dir=self.mdu.output_dir()
fns=glob.glob(os.path.join(output_dir,'*_map.nc'))
fns.sort()
return fns
def his_output(self):
"""
return path to history file output
"""
output_dir=self.mdu.output_dir()
fns=glob.glob(os.path.join(output_dir,'*_his.nc'))
# Turns out [sometimes] DFM writes a history file from each processor at the
# very end. The rank 0 file has all time steps, others just have a single
# time step.
# assert len(fns)==1
fns.sort()
return fns[0]
def hyd_output(self):
""" Path to DWAQ-format hyd file """
return os.path.join( self.run_dir,
"DFM_DELWAQ_%s"%self.mdu.name,
"%s.hyd"%self.mdu.name )
def restartable_time(self):
"""
Based on restart files, what is the latest time that restart
data exists for continuing this run?
Returns None of no restart data was found
"""
fns=glob.glob(os.path.join(self.mdu.output_dir(),'*_rst.nc'))
fns.sort() # sorts both processors and restart times
if len(fns)==0:
return None
last_rst=xr.open_dataset(fns[-1])
rst_time=last_rst.time.values[0]
last_rst.close()
return rst_time
def create_restart(self,**restart_args):
new_model=self.__class__() # in case of subclassing, rather than DFlowModel()
new_model.set_restart_from(self,**restart_args)
return new_model
def set_restart_from(self,model,deep=True,mdu_suffix=""):
"""
Pull the restart-related settings from model into the current instance.
This is going to need tweaking. Previously it would re-use the original
run directory, since outputs would go into a new sub-directory. But
that's not flexible enough for general use of restarts.
The default is a 'deep' restart, with a separate run dir.
If deep is false, then mdu_suffix must be nonempty, a new mdu will be
written alongside the existing one.
"""
if not deep:
assert mdu_suffix,"Shallow restart must provide suffix for new mdu file"
else:
self.run_dir=model.run_dir
self.mdu=model.mdu.copy()
self.mdu_basename=os.path.basename( model.mdu_basename.replace('.mdu',mdu_suffix+".mdu") )
self.mdu.set_filename( os.path.join(self.run_dir, self.mdu_basename) )
self.restart=True
self.restart_model=model
self.ref_date=model.ref_date
self.run_start=model.restartable_time()
assert self.run_start is not None,"Trying to restart run that has no restart data"
self.num_procs=model.num_procs
self.grid=model.grid
if deep:
assert self.run_dir != model.run_dir
rst_base=os.path.join(model.mdu.output_dir(),
(model.mdu.name
+'_'+utils.to_datetime(self.run_start).strftime('%Y%m%d_%H%M%S')
+'_rst.nc'))
# That gets rst_base relative to the cwd, but we need it relative
# to the new runs run_dir
self.mdu['restart','RestartFile']=os.path.relpath(rst_base,start=self.run_dir)
def restart_inputs(self):
"""
Return a list of paths to restart data that will be used as the
initial condition for this run. Assumes nonmerged style of restart data.
"""
rst_base=self.mdu['restart','RestartFile']
path=os.path.dirname(rst_base)
base=os.path.basename(rst_base)
# Assume that it has the standard naming
suffix=base[-23:] # just the date-time portion
rsts=[ (rst_base[:-23] + '_%04d'%p + rst_base[-23:])
for p in range(self.num_procs)]
return rsts
def modify_restart_data(self,modify_ic):
"""
Apply the given function to restart data, and copy the restart
files at the same time.
Updates self.mdu['restart','RestartFile'] to point to the new
location, which will be the output folder for this run.
modify_ic: fn(xr.Dataset, **kw) => None or xr.Dataset
it should take **kw, to flexibly allow more information to be passed in
in the future.
"""
for proc,rst in enumerate(self.restart_inputs()):
old_dir=os.path.dirname(rst)
new_rst=os.path.join(self.mdu.output_dir(),os.path.basename(rst))
assert rst!=new_rst
ds=xr.open_dataset(rst)
new_ds=modify_ic(ds,proc=proc,model=self)
if new_ds is None:
new_ds=ds # assume modified in place
dest_dir=os.path.dirname(new_rst)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
new_ds.to_netcdf(new_rst)
old_rst_base=self.mdu['restart','RestartFile']
new_rst_base=os.path.join( self.mdu.output_dir(), os.path.basename(old_rst_base))
self.mdu['restart','RestartFile']=new_rst_base
def extract_section(self,name=None,chain_count=1,refresh=False,
xy=None,ll=None,data_vars=None):
"""
Return xr.Dataset for monitored cross section.
currently only supports selection by name. may allow for
xy, ll in the future.
refresh: force a close/open on the netcdf.
"""
assert name is not None,"Currently sections can only be pulled by name"
his=xr.open_dataset(self.his_output())
if refresh:
his.close()
his=xr.open_dataset(self.his_output())
names=his.cross_section_name.values
try:
names=[n.decode() for n in names]
except AttributeError:
pass
if name not in names:
print("section %s not found. Options are:"%name)
print(", ".join(names))
return
idx=names.index(name)
# this has a bunch of extra cruft -- some other time remove
# the parts that are not relevant to the cross section.
ds=his.isel(cross_section=idx)
return self.translate_vars(ds,requested_vars=data_vars)
def translate_vars(self,ds,requested_vars=None):
"""
Not sure if this is the right place to handle this sort of thing.
Trying to deal with the fact that we'd like to request 'water_level'
from a model, but it may be named 'eta', 'waterlevel', 'sea_surface_height',
's1', and so on.
The interface is going to evolve here...
For now:
ds: xr.Dataset, presumably from model output.
requested_vars: if present, a list of variable names that the caller
wants. Otherwise all data variables.
Updates ds, try to find candidates for the requested variables.
"""
lookup={'flow':'cross_section_discharge',
'water_level':'waterlevel'}
if requested_vars is None:
requested_vars=ds.data_vars
for v in requested_vars:
if v in ds: continue
if (v in lookup) and (lookup[v] in ds):
ds[v]=ds[ lookup[v] ]
ds[v].attrs['history']='Copied from %s'%lookup[v]
return ds
def extract_station(self,xy=None,ll=None,name=None,refresh=False,
data_vars=None):
his=xr.open_dataset(self.his_output())
if refresh:
his.close()
his=xr.open_dataset(self.his_output())
if name is not None:
names=his.station_name.values
try:
names=[n.decode() for n in names]
except AttributeError:
pass
if name not in names:
return None
idx=names.index(name)
else:
raise Exception("Only picking by name has been implemented for DFM output")
# this has a bunch of extra cruft -- some other time remove
# the parts that are not relevant to the station
ds=his.isel(stations=idx)
# When runs are underway, some time values beyond the current point in the
# run are set to t0. Remove those.
non_increasing=(ds.time.values[1:] <= ds.time.values[:-1])
if np.any(non_increasing):
# e.g. time[1]==time[0]
# then diff(time)[0]==0
# nonzero gives us 0, and the correct slice is [:1]
stop=np.nonzero(non_increasing)[0][0]
ds=ds.isel(time=slice(None,stop+1))
return self.translate_vars(ds,requested_vars=data_vars)
class DelwaqScalarBC(hm.ScalarBC):
# for now just checking if isinstance in write_scalar_bc(), but may want to handle differently than hm.ScalarBC
pass
import sys
if sys.platform=='win32':
cls=DFlowModel
cls.dfm_bin_exe="dflowfm-cli.exe"
cls.mpi_bin_exe="mpiexec.exe"
if __name__=='__main__':
import argparse, sys
parser=argparse.ArgumentParser(description="Command line manipulation of DFM runs")
parser.add_argument('--restart', action="store_true", help='restart a run')
parser.add_argument('--mdu', metavar="file.mdu", default=None,
help='existing MDU file')
#parser.add_argument('--output', metavar="path", default=None, nargs=1,
# help='new output directory')
args=parser.parse_args()
if args.restart:
mdu_fn=args.mdu
if mdu_fn is None:
mdus=glob.glob("*.mdu")
mdus.sort()
mdu_fn=mdus[0]
print("Will use mdu_fn '%s' for input"%mdu_fn)
# Super simple approach for the moment
model=DFlowModel.load(mdu_fn)
# Update MDU
t_restart=model.restartable_time()
if t_restart is None:
print("Didn't find a restartable time")
sys.exit(1)
print("Restartable time is ",t_restart)
# Should make this configurable.
# For now, default to a 'shallow' restart, same run dir, same inputs,
# same run_stop, only changing the start time, and specifying restart file.
# Also need to add MPI support.
restart=model.create_restart(deep=False,mdu_suffix="r")
restart.run_stop=model.run_stop
restart.update_config()
assert restart.mdu.filename != mdu_fn
restart.mdu.write()
print("Shallow restart: %s to %s, mdu=%s"%(restart.run_start,
restart.run_stop,
restart.mdu.filename))
def extract_transect_his(his_ds,pattern):
"""
Helper method to create a single xr.Dataset compatible with xr_transect
out of a group of history output locations.
his_ds: xr.Dataset for history output of a run.
pattern: regular expression for the station names. For example, if the
stations are tranA_0000, tranA_0001, ..., tranA_0099
then pattern='tranA_00..' or just 'tranA.*'
Station names are assumed to be sorted along the transect. Sorting is by
python default ordering, so tranA_01 and tranA_1 are not the same.
TODO: include projected velocities
"""
import re
# Gather station indexes for matching names
names={}
for i,name in enumerate(his_ds.station_name.values):
if name in names: continue # on the off chance that names are repeated.
if re.match(pattern,name.decode()):
names[name]=i
# sort names
roster=list(names.keys())
order=np.argsort(roster)
idxs=[ names[roster[i]] for i in order]
extra_dims=['cross_section','gategens','general_structures','nFlowLink',
'nNetLink','nFlowElemWithBnd','station_geom_nNodes']
ds=his_ds.drop_dims(extra_dims).isel(stations=idxs)
# Make it look like an xr_transect
dsxr=ds.rename(stations='sample',station_x_coordinate='x_sample',station_y_coordinate='y_sample',
laydim='layer',laydimw='interface',zcoordinate_c='z_ctr',zcoordinate_w='z_int')
# add distance?
xy=np.c_[ dsxr.x_sample.values,
dsxr.y_sample.values ]
dsxr['d_sample']=('sample',),utils.dist_along(xy)
return dsxr
| mit |
aosingh/Regularization | Lp/MSE_Lp.py | 1 | 1938 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.datasets.samples_generator import make_regression
from LpTester import start_lp_regression
from mpl_toolkits.mplot3d import Axes3D
# Define synthetic data-set constants. Change this to experiment with different data sets
NUM_OF_SAMPLES = 1000
NUM_OF_FEATURES = 2
NOISE = 10
# Define the number of iterations and learning rate for Linear regression.
NUM_OF_ITERATIONS = 2000
LEARNING_RATE = 0.01
LP_REGULARIZATION_STRENGTH = 1.0
# generate sample data-set using the following function.
training_rec, out = make_regression(n_samples=NUM_OF_SAMPLES,
n_features=NUM_OF_FEATURES,
n_informative=1,
noise=NOISE)
# Add a columns of 1s as bias(intercept) in the training records
training_rec = np.c_[np.ones(training_rec.shape[0]), training_rec]
weights_table, MSEcost = start_lp_regression(training_records=training_rec, output=out)
itr = []
w1 = []
w2 = []
for i in range(0,len(weights_table)-1):
itr.append(i)
#if not (i==0):
print "w1: ", weights_table[i][1]
w1.append(weights_table[i][1])
w2.append(weights_table[i][2])
print "w2: ", weights_table[i][2]
print MSEcost[i]
#plot of error through each gradient descent iteration
x = itr
y = MSEcost
plt.errorbar(x, y, xerr=0, yerr=0)
blue_patch = mpatches.Patch(color='blue', label="MSE Error for Lp Regularizer", )
plt.legend(handles=[blue_patch], )
plt.xlabel('Iterations')
plt.ylabel('MSE cost')
plt.show()
#plot error versus w1 and w2
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
x = w1
y = w2
z = MSEcost
ax.plot(x, y, z, label='MSE Error curve for Lp (p=0.5) Regularizer - In coefficient Space')
ax.legend()
plt.xlabel('Coefficient 1')
plt.ylabel('Coefficient 2')
plt.show()
| mit |
antiface/ThinkStats2 | code/brfss.py | 69 | 4708 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import sys
import pandas
import numpy as np
import thinkstats2
import thinkplot
def Summarize(df, column, title):
"""Print summary statistics male, female and all."""
items = [
('all', df[column]),
('male', df[df.sex == 1][column]),
('female', df[df.sex == 2][column]),
]
print(title)
print('key\tn\tmean\tvar\tstd\tcv')
for key, series in items:
mean, var = series.mean(), series.var()
std = math.sqrt(var)
cv = std / mean
t = key, len(series), mean, var, std, cv
print('%s\t%d\t%4.2f\t%4.2f\t%4.2f\t%4.4f' % t)
def CleanBrfssFrame(df):
"""Recodes BRFSS variables.
df: DataFrame
"""
# clean age
df.age.replace([7, 9], float('NaN'), inplace=True)
# clean height
df.htm3.replace([999], float('NaN'), inplace=True)
# clean weight
df.wtkg2.replace([99999], float('NaN'), inplace=True)
df.wtkg2 /= 100.0
# clean weight a year ago
df.wtyrago.replace([7777, 9999], float('NaN'), inplace=True)
df['wtyrago'] = df.wtyrago.apply(lambda x: x/2.2 if x < 9000 else x-9000)
def ReadBrfss(filename='CDBRFS08.ASC.gz', compression='gzip', nrows=None):
"""Reads the BRFSS data.
filename: string
compression: string
nrows: int number of rows to read, or None for all
returns: DataFrame
"""
var_info = [
('age', 101, 102, int),
('sex', 143, 143, int),
('wtyrago', 127, 130, int),
('finalwt', 799, 808, int),
('wtkg2', 1254, 1258, int),
('htm3', 1251, 1253, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, compression=compression, nrows=nrows)
CleanBrfssFrame(df)
return df
def MakeNormalModel(weights):
"""Plots a CDF with a Normal model.
weights: sequence
"""
cdf = thinkstats2.Cdf(weights, label='weights')
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = math.sqrt(var)
print('n, mean, std', len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label='model', linewidth=4, color='0.8')
thinkplot.Cdf(cdf)
def MakeNormalPlot(weights):
"""Generates a normal probability plot of birth weights.
weights: sequence
"""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color='0.8', label='model')
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='weights')
def MakeFigures(df):
"""Generates CDFs and normal prob plots for weights and log weights."""
weights = df.wtkg2.dropna()
log_weights = np.log10(weights)
# plot weights on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalModel(weights)
thinkplot.Config(xlabel='adult weight (kg)', ylabel='CDF')
thinkplot.SubPlot(2)
MakeNormalModel(log_weights)
thinkplot.Config(xlabel='adult weight (log10 kg)')
thinkplot.Save(root='brfss_weight')
# make normal probability plots on linear and log scales
thinkplot.PrePlot(cols=2)
MakeNormalPlot(weights)
thinkplot.Config(xlabel='z', ylabel='weights (kg)')
thinkplot.SubPlot(2)
MakeNormalPlot(log_weights)
thinkplot.Config(xlabel='z', ylabel='weights (log10 kg)')
thinkplot.Save(root='brfss_weight_normal')
def main(script, nrows=1000):
"""Tests the functions in this module.
script: string script name
"""
thinkstats2.RandomSeed(17)
nrows = int(nrows)
df = ReadBrfss(nrows=nrows)
MakeFigures(df)
Summarize(df, 'htm3', 'Height (cm):')
Summarize(df, 'wtkg2', 'Weight (kg):')
Summarize(df, 'wtyrago', 'Weight year ago (kg):')
if nrows == 1000:
assert(df.age.value_counts()[40] == 28)
assert(df.sex.value_counts()[2] == 668)
assert(df.wtkg2.value_counts()[90.91] == 49)
assert(df.wtyrago.value_counts()[160/2.2] == 49)
assert(df.htm3.value_counts()[163] == 103)
assert(df.finalwt.value_counts()[185.870345] == 13)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
glennq/scikit-learn | sklearn/feature_selection/tests/test_mutual_info.py | 56 | 6268 | from __future__ import division
import numpy as np
from numpy.testing import run_module_suite
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_array_equal, assert_almost_equal,
assert_false, assert_raises, assert_equal)
from sklearn.feature_selection.mutual_info_ import (
mutual_info_regression, mutual_info_classif, _compute_mi)
def test_compute_mi_dd():
# In discrete case computations are straightforward and can be done
# by hand on given vectors.
x = np.array([0, 1, 1, 0, 0])
y = np.array([1, 0, 0, 0, 1])
H_x = H_y = -(3/5) * np.log(3/5) - (2/5) * np.log(2/5)
H_xy = -1/5 * np.log(1/5) - 2/5 * np.log(2/5) - 2/5 * np.log(2/5)
I_xy = H_x + H_y - H_xy
assert_almost_equal(_compute_mi(x, y, True, True), I_xy)
def test_compute_mi_cc():
# For two continuous variables a good approach is to test on bivariate
# normal distribution, where mutual information is known.
# Mean of the distribution, irrelevant for mutual information.
mean = np.zeros(2)
# Setup covariance matrix with correlation coeff. equal 0.5.
sigma_1 = 1
sigma_2 = 10
corr = 0.5
cov = np.array([
[sigma_1**2, corr * sigma_1 * sigma_2],
[corr * sigma_1 * sigma_2, sigma_2**2]
])
# True theoretical mutual information.
I_theory = (np.log(sigma_1) + np.log(sigma_2) -
0.5 * np.log(np.linalg.det(cov)))
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
x, y = Z[:, 0], Z[:, 1]
# Theory and computed values won't be very close, assert that the
# first figures after decimal point match.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, False, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd():
# To test define a joint distribution as follows:
# p(x, y) = p(x) p(y | x)
# X ~ Bernoulli(p)
# (Y | x = 0) ~ Uniform(-1, 1)
# (Y | x = 1) ~ Uniform(0, 2)
# Use the following formula for mutual information:
# I(X; Y) = H(Y) - H(Y | X)
# Two entropies can be computed by hand:
# H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
# H(Y | X) = ln(2)
# Now we need to implement sampling from out distribution, which is
# done easily using conditional distribution logic.
n_samples = 1000
np.random.seed(0)
for p in [0.3, 0.5, 0.7]:
x = np.random.uniform(size=n_samples) > p
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
I_theory = -0.5 * ((1 - p) * np.log(0.5 * (1 - p)) +
p * np.log(0.5 * p) + np.log(0.5)) - np.log(2)
# Assert the same tolerance.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, True, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd_unique_label():
# Test that adding unique label doesn't change MI.
n_samples = 100
x = np.random.uniform(size=n_samples) > 0.5
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
mi_1 = _compute_mi(x, y, True, False)
x = np.hstack((x, 2))
y = np.hstack((y, 10))
mi_2 = _compute_mi(x, y, True, False)
assert_equal(mi_1, mi_2)
# We are going test that feature ordering by MI matches our expectations.
def test_mutual_info_classif_discrete():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]])
y = np.array([0, 1, 2, 2, 1])
# Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
# informative.
mi = mutual_info_classif(X, y, discrete_features=True)
assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
def test_mutual_info_regression():
# We generate sample from multivariate normal distribution, using
# transformation from initially uncorrelated variables. The zero
# variables after transformation is selected as the target vector,
# it has the strongest correlation with the variable 2, and
# the weakest correlation with the variable 1.
T = np.array([
[1, 0.5, 2, 1],
[0, 1, 0.1, 0.0],
[0, 0.1, 1, 0.1],
[0, 0.1, 0.1, 1]
])
cov = T.dot(T.T)
mean = np.zeros(4)
np.random.seed(0)
Z = np.random.multivariate_normal(mean, cov, size=1000)
X = Z[:, 1:]
y = Z[:, 0]
mi = mutual_info_regression(X, y, random_state=0)
assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
def test_mutual_info_classif_mixed():
# Here the target is discrete and there are two continuous and one
# discrete feature. The idea of this test is clear from the code.
np.random.seed(0)
X = np.random.rand(1000, 3)
X[:, 1] += X[:, 0]
y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
X[:, 2] = X[:, 2] > 0.5
mi = mutual_info_classif(X, y, discrete_features=[2], random_state=0)
assert_array_equal(np.argsort(-mi), [2, 0, 1])
def test_mutual_info_options():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]], dtype=float)
y = np.array([0, 1, 2, 2, 1], dtype=float)
X_csr = csr_matrix(X)
for mutual_info in (mutual_info_regression, mutual_info_classif):
assert_raises(ValueError, mutual_info_regression, X_csr, y,
discrete_features=False)
mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0)
mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
mi_3 = mutual_info(X_csr, y, discrete_features='auto',
random_state=0)
mi_4 = mutual_info(X_csr, y, discrete_features=True,
random_state=0)
assert_array_equal(mi_1, mi_2)
assert_array_equal(mi_3, mi_4)
assert_false(np.allclose(mi_1, mi_3))
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
poryfly/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
dhaase-de/dh-python-dh | dh/thirdparty/tqdm/_tqdm.py | 1 | 43483 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict
# native libraries
import sys
from numbers import Number
from threading import Thread
from time import time
from time import sleep
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmDeprecationWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmDeprecationWarning(Exception):
# not suppressed if raised
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\nTqdmDeprecationWarning: " + str(msg).rstrip() + '\n')
else:
super(TqdmDeprecationWarning, self).__init__(msg, *a, **k)
class TMonitor(Thread):
"""
Monitoring thread for tqdm bars.
Monitors if tqdm bars are taking too much time to display
and readjusts miniters automatically if necessary.
Parameters
----------
tqdm_cls : class
tqdm class to use (can be core tqdm or a submodule).
sleep_interval : fload
Time to sleep between monitoring checks.
"""
# internal vars for unit testing
_time = None
_sleep = None
def __init__(self, tqdm_cls, sleep_interval):
# setcheckinterval is deprecated
getattr(sys, 'setswitchinterval',
getattr(sys, 'setcheckinterval'))(100)
Thread.__init__(self)
self.daemon = True # kill thread when main killed (KeyboardInterrupt)
self.was_killed = False
self.woken = 0 # last time woken up, to sync with monitor
self.tqdm_cls = tqdm_cls
self.sleep_interval = sleep_interval
if TMonitor._time is not None:
self._time = TMonitor._time
else:
self._time = time
if TMonitor._sleep is not None:
self._sleep = TMonitor._sleep
else:
self._sleep = sleep
self.start()
def exit(self):
self.was_killed = True
# self.join() # DO NOT, blocking event, slows down tqdm at closing
return self.report()
def run(self):
cur_t = self._time()
while True:
# After processing and before sleeping, notify that we woke
# Need to be done just before sleeping
self.woken = cur_t
# Sleep some time...
self._sleep(self.sleep_interval)
# Quit if killed
# if self.exit_event.is_set(): # TODO: should work but does not...
if self.was_killed:
return
# Then monitor!
cur_t = self._time()
# Check for each tqdm instance if one is waiting too long to print
# NB: copy avoids size change during iteration RuntimeError
for instance in self.tqdm_cls._instances.copy():
# Only if mininterval > 1 (else iterations are just slow)
# and last refresh was longer than maxinterval in this instance
if instance.miniters > 1 and \
(cur_t - instance.last_print_t) >= instance.maxinterval:
# We force bypassing miniters on next iteration
# dynamic_miniters should adjust mininterval automatically
instance.miniters = 1
# Refresh now! (works only for manual tqdm)
instance.refresh()
def report(self):
# return self.is_alive() # TODO: does not work...
return not self.was_killed
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='',
ascii=False, unit='it', unit_scale=False, rate=None,
bar_format=None, postfix=None):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an
appropriate SI metric prefix (K = 10^3, M = 10^6, etc.)
[default: False].
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
postfix : str, optional
Same as prefix but will be placed at the end as additional stats.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if (rate and (rate < 1)) else None
format_sizeof = tqdm.format_sizeof
rate_fmt = ((format_sizeof(inv_rate if inv_rate else rate)
if unit_scale else
'{0:5.2f}'.format(inv_rate if inv_rate else rate))
if rate else '?') \
+ ('s' if inv_rate else unit) + '/' + (unit if inv_rate else 's')
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
l_bar = (prefix if prefix else '') + \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt,
', ' + postfix if postfix else '')
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': rate if inv_rate is None else inv_rate,
'rate_noinv': rate,
'rate_noinv_fmt': ((format_sizeof(rate)
if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s',
'rate_fmt': rate_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix if prefix else '',
'postfix': ', ' + postfix if postfix else '',
# 'bar': full_bar # replaced by procedure below
}
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return (prefix if prefix else '') + '{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt,
', ' + postfix if postfix else '')
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or
not cls.monitor.report()):
cls.monitor = TMonitor(cls, cls.monitor_interval)
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
""" Skips specified instance """
try:
return max(inst.pos for inst in cls._instances
if inst is not instance) + 1
except ValueError as e:
if "arg is an empty sequence" in str(e):
return 0
raise # pragma: no cover
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
try: # in case instance was explicitly positioned, it won't be in set
cls._instances.remove(instance)
for inst in cls._instances:
if inst.pos > instance.pos:
inst.pos -= 1
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
cls.monitor.exit()
try:
del cls.monitor
except AttributeError:
pass
cls.monitor = None
except KeyError:
pass
@classmethod
def write(cls, s, file=None, end="\n"):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(f in (sys.stdout, sys.stderr)
for f in (fp, inst.fp)):
inst.clear()
inst_cleared.append(inst)
# Write the message
fp.write(s)
fp.write(end)
# Force refresh display of bars we cleared
for inst in inst_cleared:
# Avoid race conditions by checking that the instance started
if hasattr(inst, 'start_t'): # pragma: nocover
inst.refresh()
# TODO: make list of all instances incl. absolutely positioned ones?
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
from pandas import Panel
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
*args, *kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = getattr(df, 'ngroups', None)
if total is None: # not grouped
total = len(df) if isinstance(df, Series) \
else df.size // len(df)
else:
total += 1 # pandas calls update once too many
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
t.update()
return func(*args, **kwargs)
# Apply the provided function (in *args and **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, *args, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1,
maxinterval=10.0, miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, dynamic_ncols=False,
smoothing=0.3, bar_format=None, initial=0, position=None,
postfix=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress display update interval, in seconds [default: 10].
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict, optional
Specify additional stats to display at the end of the bar.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
return
if file is None:
file = sys.stderr
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ': ' if desc else ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
self.set_postfix(**postfix)
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
if position is None:
self.pos = self._get_free_pos(self)
else:
self.pos = position
self._instances.remove(self)
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if self.pos:
self.moveto(self.pos)
self.sp(self.format_meter(self.n, total, 0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc, ascii, unit, unit_scale, None, bar_format,
self.postfix))
if self.pos:
self.moveto(-self.pos)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else self.total)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(self.n, self.total,
self._time() - self.start_t,
self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time
if self.avg_time else None, self.bar_format,
self.postfix)
def __lt__(self, other):
return self.pos < other.pos
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
return self.pos == other.pos
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
_time = self._time
format_meter = self.format_meter
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check the counter first (avoid calls to time())
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
elapsed = cur_t - start_t # optimised if in inner loop
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
if self.pos:
self.moveto(self.pos)
# Printing the bar's update
sp(format_meter(
n, self.total, elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols
else ncols),
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format,
self.postfix))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * \
(mininterval / delta_t
if mininterval and delta_t
else 1) + \
(1 - smoothing) * miniters
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
if self.n - self.last_print_n >= self.miniters:
# We check the counter first, to reduce the overhead of time()
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # should be n?
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
if self.pos:
self.moveto(self.pos)
# Print bar's update
self.sp(self.__repr__())
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = self.pos
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
cur_t = self._time()
# stats for overall rate (no weighted average)
self.sp(self.format_meter(
self.n, self.total, cur_t - self.start_t,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale, None,
self.bar_format, self.postfix))
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None):
"""
Set/modify description of the progress bar.
"""
self.desc = desc + ': ' if desc else ''
def set_postfix(self, ordered_dict=None, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{0:2.3g}'.format(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
def clear(self, nomove=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nomove:
self.moveto(self.pos)
# clear up the bar (can't rely on sp(''))
self.fp.write('\r')
self.fp.write(' ' * (self.ncols if self.ncols else 10))
self.fp.write('\r') # place cursor back at the beginning of line
if not nomove:
self.moveto(-self.pos)
def refresh(self):
"""
Force refresh the display of this bar
"""
if self.disable:
return
self.moveto(self.pos)
# clear up this line's content (whatever there was)
self.clear(nomove=True)
# Print current/last bar state
self.fp.write(self.__repr__())
self.moveto(-self.pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| mit |
linebp/pandas | pandas/tests/series/test_period.py | 7 | 8836 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas import Series, period_range, DataFrame, Period
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesPeriod(object):
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_auto_conversion(self):
series = Series(list(period_range('2000-01-01', periods=10, freq='D')))
assert series.dtype == 'object'
series = pd.Series([pd.Period('2011-01-01', freq='D'),
pd.Period('2011-02-01', freq='D')])
assert series.dtype == 'object'
def test_getitem(self):
assert self.series[1] == pd.Period('2000-01-02', freq='D')
result = self.series[[2, 4]]
exp = pd.Series([pd.Period('2000-01-03', freq='D'),
pd.Period('2000-01-05', freq='D')],
index=[2, 4])
tm.assert_series_equal(result, exp)
assert result.dtype == 'object'
def test_isnull(self):
# GH 13737
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('NaT', freq='M')])
tm.assert_series_equal(s.isnull(), Series([False, True]))
tm.assert_series_equal(s.notnull(), Series([True, False]))
def test_fillna(self):
# GH 13737
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('NaT', freq='M')])
res = s.fillna(pd.Period('2012-01', freq='M'))
exp = Series([pd.Period('2011-01', freq='M'),
pd.Period('2012-01', freq='M')])
tm.assert_series_equal(res, exp)
assert res.dtype == 'object'
res = s.fillna('XXX')
exp = Series([pd.Period('2011-01', freq='M'), 'XXX'])
tm.assert_series_equal(res, exp)
assert res.dtype == 'object'
def test_dropna(self):
# GH 13737
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('NaT', freq='M')])
tm.assert_series_equal(s.dropna(),
Series([pd.Period('2011-01', freq='M')]))
def test_series_comparison_scalars(self):
val = pd.Period('2000-01-04', freq='D')
result = self.series > val
expected = pd.Series([x > val for x in self.series])
tm.assert_series_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = pd.Series([x > val for x in self.series])
tm.assert_series_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# NaT support
"""
# ToDo: Enable when support period dtype
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='period[D]')
val = series[3]
assert isnull(val)
series[2] = val
assert isnull(series[2])
def test_NaT_cast(self):
result = Series([np.nan]).astype('period[D]')
expected = Series([NaT])
tm.assert_series_equal(result, expected)
"""
def test_set_none_nan(self):
# currently Period is stored as object dtype, not as NaT
self.series[3] = None
assert self.series[3] is None
self.series[3:5] = None
assert self.series[4] is None
self.series[5] = np.nan
assert np.isnan(self.series[5])
self.series[5:7] = np.nan
assert np.isnan(self.series[6])
def test_intercept_astype_object(self):
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
df = DataFrame({'a': self.series, 'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
def test_comp_series_period_scalar(self):
# GH 13200
for freq in ['M', '2M', '3M']:
base = Series([Period(x, freq=freq) for x in
['2011-01', '2011-02', '2011-03', '2011-04']])
p = Period('2011-02', freq=freq)
exp = pd.Series([False, True, False, False])
tm.assert_series_equal(base == p, exp)
tm.assert_series_equal(p == base, exp)
exp = pd.Series([True, False, True, True])
tm.assert_series_equal(base != p, exp)
tm.assert_series_equal(p != base, exp)
exp = pd.Series([False, False, True, True])
tm.assert_series_equal(base > p, exp)
tm.assert_series_equal(p < base, exp)
exp = pd.Series([True, False, False, False])
tm.assert_series_equal(base < p, exp)
tm.assert_series_equal(p > base, exp)
exp = pd.Series([False, True, True, True])
tm.assert_series_equal(base >= p, exp)
tm.assert_series_equal(p <= base, exp)
exp = pd.Series([True, True, False, False])
tm.assert_series_equal(base <= p, exp)
tm.assert_series_equal(p >= base, exp)
# different base freq
msg = "Input has different freq=A-DEC from Period"
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
base <= Period('2011', freq='A')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
Period('2011', freq='A') >= base
def test_comp_series_period_series(self):
# GH 13200
for freq in ['M', '2M', '3M']:
base = Series([Period(x, freq=freq) for x in
['2011-01', '2011-02', '2011-03', '2011-04']])
s = Series([Period(x, freq=freq) for x in
['2011-02', '2011-01', '2011-03', '2011-05']])
exp = Series([False, False, True, False])
tm.assert_series_equal(base == s, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != s, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > s, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < s, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= s, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= s, exp)
s2 = Series([Period(x, freq='A') for x in
['2011', '2011', '2011', '2011']])
# different base freq
msg = "Input has different freq=A-DEC from Period"
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
base <= s2
def test_comp_series_period_object(self):
# GH 13200
base = Series([Period('2011', freq='A'), Period('2011-02', freq='M'),
Period('2013', freq='A'), Period('2011-04', freq='M')])
s = Series([Period('2012', freq='A'), Period('2011-01', freq='M'),
Period('2013', freq='A'), Period('2011-05', freq='M')])
exp = Series([False, False, True, False])
tm.assert_series_equal(base == s, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != s, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > s, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < s, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= s, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= s, exp)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
ts + ts.asfreq('D', how="end")
| bsd-3-clause |
google/audio-to-tactile | extras/python/phonetics/phone_model.py | 1 | 22911 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Train and eval a network for mapping audio to 2D vowel space coordinate."""
import datetime
import functools
import os
import os.path
import random
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple
from absl import flags
import dataclasses
import haiku as hk
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.figure
import numpy as np
import optax
import scipy.ndimage
from extras.python.phonetics import hk_util
from extras.python.phonetics import phone_util
from extras.python.phonetics import plot
from extras.python.phonetics import stats
# By default, train to classify these monophthong vowel classes.
# Additionally, TIMIT has these consonant classes (after merging several very
# similar classes):
# r,z,n,f,dh,s,v,m,l,sh,hh,ng,w,q,y,k,th,el,p,ch,t,en,jh,g,b,zh,d,em,dx,nx,eng
# and there is a "sil" (silence) class for pauses between speech.
DEFAULT_CLASSES = 'aa,uw,ih,iy,eh,ae,ah,er'
FLAGS = flags.FLAGS
# Model hyperparameters.
flags.DEFINE_list('classes', DEFAULT_CLASSES,
'The model is trained to classify these phoneme classes.')
flags.DEFINE_list('hidden_units', ['16', '16'],
'List where the ith element represents the number of units '
'in the ith hidden layer.')
flags.DEFINE_float('h1_penalty', 1e-4,
'h1 regularizer penalty weight on the first layer.')
flags.DEFINE_float('l1_penalty', 1e-4,
'L1 regularizer penalty weight on the other layers.')
flags.DEFINE_float('disperse_penalty', 0.75,
'Penalty to disperse embedded points of different labels.')
flags.DEFINE_float('disperse_separation', 0.3,
'Parameter of disperse penalty.')
flags.DEFINE_float('mapping_penalty', 400.0,
'Penalty to encourage matching MAPPING_TARGETS.')
flags.DEFINE_float('mapping_delta', 0.1,
'Charbonnier delta parameter in mapping penalty.')
# Training flags.
flags.DEFINE_float('validation_fraction', 0.05,
'Fraction of training dataset to use for validation.')
flags.DEFINE_integer('num_epochs', 10,
'Number of training epochs.')
flags.DEFINE_integer('batch_size', 512,
'Number of training examples per batch.')
def angle2cart(angle_deg: float, mag: float = 1.0) -> Tuple[float, float]:
theta = angle_deg * np.pi / 180.0
return (mag * np.cos(theta), mag * np.sin(theta))
# 2D target mapping coordinates for each class.
MAPPING_TARGETS = {
'aa': angle2cart(-90),
'uw': angle2cart(-30),
'ih': angle2cart(30),
'iy': angle2cart(90),
'eh': angle2cart(150),
'ae': angle2cart(-150),
'ah': (0.0, 0.0),
'er': angle2cart(0),
'uh': angle2cart(-90, 0.5),
}
@dataclasses.dataclass
class Metadata:
"""Metadata for phone model."""
classes: Sequence[str]
hidden_units: Sequence[int]
h1_penalty: float
l1_penalty: float
disperse_penalty: float
disperse_separation: float
mapping_penalty: float
mapping_delta: float
validation_fraction: float
num_epochs: int
batch_size: int
dataset_metadata: Optional[Mapping[str, Any]] = None
@staticmethod
def from_flags() -> 'Metadata':
"""Construct Metadata from flags."""
return Metadata(
classes=FLAGS.classes,
hidden_units=tuple(int(units) for units in FLAGS.hidden_units),
h1_penalty=FLAGS.h1_penalty,
l1_penalty=FLAGS.l1_penalty,
disperse_penalty=FLAGS.disperse_penalty,
disperse_separation=FLAGS.disperse_separation,
mapping_penalty=FLAGS.mapping_penalty,
mapping_delta=FLAGS.mapping_delta,
validation_fraction=FLAGS.validation_fraction,
num_epochs=FLAGS.num_epochs,
batch_size=FLAGS.batch_size,
)
def load_dataset(npz_file: str,
classes: Sequence[str],
class_weights: Optional[Dict[str, float]] = None,
) -> phone_util.Dataset:
"""Loads training or testing data from a numpy .npz file.
The .npz file holds 3D arrays of examples. The arrays are named according to
which class they represent, e.g. an array named 'ae' represents examples with
ground truth label 'ae'.
Args:
npz_file: String, npz filename.
classes: List of phoneme class names to train the model to classify.
class_weights: Dict, class weights for randomly subsampling the data. The
fraction of examples retained for class `phone` is
`class_weights.get(phone, 1.0) / max(class_weights.value())`
Returns:
Dataset.
"""
if class_weights is None:
class_weights = {}
max_weight = max(class_weights.values()) if class_weights else 1.0
class_weights = {phone: class_weights.get(phone, 1.0) / max_weight
for phone in classes}
dataset = phone_util.read_dataset_npz(npz_file)
dataset.subsample(class_weights)
return dataset
def embedding_regularizer(embedded: jnp.ndarray,
labels: jnp.ndarray,
meta: Metadata) -> jnp.ndarray:
"""Penalty to encourage good distribution in the embedding space."""
embedded = embedded[:, -1, :]
batch_size = embedded.shape[0]
# Penalize close points of different labels according to
# 1 / (1 + (min(distance, separation) / separation)^2)
# Comparing all pairs of points would cost quadratically with batch size. To
# reduce the cost to linear, we compare only the first point to the rest of
# the batch.
p0 = embedded[0]
others = embedded[1:]
separation_sqr = meta.disperse_separation**2
dist_sqr = (p0[0] - others[:, 0])**2 + (p0[1] - others[:, 1])**2
penalties = meta.disperse_penalty * jnp.sum(
jnp.where(labels[0] != labels[1:], 1, 0.0) *
1 / (1 + jnp.minimum(dist_sqr, separation_sqr) / separation_sqr))
# For phones in MAPPING_TARGETS, use a Charbonnier loss to encourage points of
# those labels to be close to the target.
for i, phone in enumerate(meta.classes):
if phone in MAPPING_TARGETS:
tx, ty = MAPPING_TARGETS[phone]
penalties += meta.mapping_penalty * hk_util.charbonnier_loss_on_squared(
jnp.dot(labels == i,
(embedded[:, 0] - tx)**2 + (embedded[:, 1] - ty)**2),
meta.mapping_delta)
return penalties / batch_size
def model_fun(batch, meta: Metadata) -> Dict[str, jnp.ndarray]:
"""Builds model for phone mapping."""
num_frames = 1 + meta.dataset_metadata['num_frames_left_context']
num_channels = meta.dataset_metadata['num_channels']
penalties = 0.0
# The network input x has shape (batch, num_frames, num_channels), where
# typically batch=512, num_frames=3, num_channels=56.
x = batch['observed'].astype(jnp.float32)
# Compute mean PCEN power of the frame.
mean_power = jnp.mean(x, axis=-1, keepdims=True)
#### Encoder. ####
# The first few layers of the network process each frame independently. We
# temporarily reshape to (batch * num_frames, num_channels), flattening the
# frames dimension into the batch dimension.
x = jnp.reshape(x, (-1, num_channels))
h1_regularizer = lambda w: FLAGS.h1_penalty * hk_util.h1_loss(w)
l1_regularizer = lambda w: FLAGS.l1_penalty * hk_util.l1_loss(w)
# Apply several fully-connected layers. Use H1 regularization on the first
# layer to encourage smoothness along the channel dimension.
for i, units in enumerate(meta.hidden_units):
w_regularizer = h1_regularizer if i == 0 else l1_regularizer
x, penalty_term = hk_util.Linear(units, w_regularizer=w_regularizer)(x)
penalties += penalty_term
x = jax.nn.relu(x)
# Bottleneck layer, mapping the frame down to a 2D embedding space. We use
# tanh activation to restrict embedding to the square [-1, 1] x [-1, 1].
x, penalty_term = hk_util.Linear(2, w_regularizer=None)(x)
penalties += penalty_term
# Constrain embedded point to the hexagon.
embed_r = 1e-4 + hk_util.hexagon_norm(x[:, 0], x[:, 1])
x *= (jax.lax.tanh(embed_r) / embed_r).reshape(-1, 1)
# Now we reshape the frame dimension back out of the batch dimension. The next
# steps will process the embedded frames jointly.
embedded = x = jnp.reshape(x, (-1, num_frames, x.shape[-1]))
# Concatenate with mean_power to make a 3D embedding space. This extra
# dimension is meant as a proxy for the information in the energy envelope.
x = jnp.concatenate((x, mean_power), axis=-1)
#### Decoder. ####
# Decoder with a fixed architecture of 16-unit hidden layer.
x = hk.Flatten()(x)
x, penalty_term = hk_util.Linear(16, w_regularizer=l1_regularizer)(x)
penalties += penalty_term
x = jax.nn.relu(x)
# Final layer producing a score for each phone class.
scores, penalty_term = hk_util.Linear(len(meta.classes),
w_regularizer=l1_regularizer)(x)
penalties += penalty_term
return {'embedded': embedded,
'scores': scores,
'penalties': penalties}
def train_model(meta: Metadata,
dataset: phone_util.Dataset) -> hk_util.TrainedModel:
"""Train the model."""
model = hk_util.transform(functools.partial(model_fun, meta=meta))
# Split off a separate validation dataset.
dataset_val, dataset_train = dataset.split(meta.validation_fraction)
def generate_batches(dataset: phone_util.Dataset, batch_size: int):
"""Partition into batches. Examples in any partial batch are dropped."""
x, y = dataset.get_xy_arrays(meta.classes, shuffle=True)
batch_size = min(batch_size, len(x))
num_batches = len(x) // batch_size
batches_x = x[:num_batches * batch_size].reshape(
num_batches, batch_size, *x.shape[1:])
batches_y = y[:num_batches * batch_size].reshape(
num_batches, batch_size)
return batches_x, batches_y
train_x, train_y = generate_batches(dataset_train, batch_size=meta.batch_size)
t_eval_x, t_eval_y = generate_batches(dataset_train, batch_size=10000)
t_eval_batch = {'observed': t_eval_x[0], 'label': t_eval_y[0]}
v_eval_x, v_eval_y = generate_batches(dataset_val, batch_size=10000)
v_eval_batch = {'observed': v_eval_x[0], 'label': v_eval_y[0]}
# Initialize network and optimizer.
seed = np.uint64(random.getrandbits(64))
params = model.init(jax.random.PRNGKey(seed),
{'observed': train_x[0], 'label': train_y[0]})
optimizer = optax.adam(1e-3)
opt_state = optimizer.init(params)
# Print model summary.
print(hk_util.summarize_model(params))
def loss_fun(params, batch):
"""Training loss to optimize."""
outputs = model.apply(params, None, batch)
labels = hk.one_hot(batch['label'], len(meta.classes))
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(outputs['scores']))
softmax_xent /= labels.shape[0]
disperse = embedding_regularizer(outputs['embedded'], batch['label'], meta)
return softmax_xent + disperse + outputs['penalties']
@jax.jit
def train_step(params, opt_state, batch):
"""Learning update rule."""
grads = jax.grad(loss_fun)(params, batch)
updates, opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
@jax.jit
def accuracy(params, batch):
"""Evaluate classification accuracy."""
scores = model.apply(params, None, batch)['scores']
return jnp.mean(jnp.argmax(scores, axis=-1) == batch['label'])
# Training loop.
num_steps = len(train_x) * meta.num_epochs
step_digits = len(str(num_steps))
step = 0
for _ in range(meta.num_epochs):
for batch_x, batch_y in zip(train_x, train_y):
step += 1
train_batch = {'observed': batch_x, 'label': batch_y}
final_step = (step == num_steps)
if final_step or step % 500 == 0:
# Periodically evaluate classification accuracy on train & test sets.
train_accuracy = accuracy(params, t_eval_batch)
val_accuracy = accuracy(params, v_eval_batch)
train_accuracy, val_accuracy = jax.device_get(
(train_accuracy, val_accuracy))
print(f'[{step:-{step_digits}d}/{num_steps}] train acc = '
f'{train_accuracy:.4f}, val acc = {val_accuracy:.4f}')
params, opt_state = train_step(params, opt_state, train_batch)
return hk_util.TrainedModel(model, meta=meta, params=params)
def compute_2d_hists(labels: np.ndarray,
coords: np.ndarray,
num_classes: int,
num_bins: int = 50,
hist_smoothing_stddev=0.05) -> np.ndarray:
"""Compute a 2D histogram over [-1, 1] x [-1, 1] for each class."""
bin_width = 2 / num_bins
hist = np.empty((num_classes, num_bins, num_bins))
for i in range(num_classes):
mask = (labels == i)
coords_i = coords[mask].reshape(-1, 2)
x, y = coords_i[:, 0], coords_i[:, 1]
hist[i] = np.histogram2d(y, x, bins=num_bins, range=([-1, 1], [-1, 1]))[0]
# For a more reliable density estimate, smooth the histogram with a Gaussian
# kernel with stddev `hist_smoothing_stddev`.
sigma = hist_smoothing_stddev / bin_width
hist[i] = scipy.ndimage.gaussian_filter(hist[i], sigma)
# Normalize density to integrate to one.
hist[i] /= 1e-12 + bin_width**2 * hist[i].sum()
return hist
def get_subplot_shape(num_subplots: int) -> Tuple[int, int]:
subplot_rows = max(1, int(np.sqrt(num_subplots)))
subplot_cols = -(-num_subplots // subplot_rows)
return subplot_rows, subplot_cols
def plot_spatial_hists(labels: np.ndarray,
logits: np.ndarray,
classes: Sequence[str]) -> matplotlib.figure.Figure:
"""Plot histograms of how each phoneme class maps spatially to tactors."""
# Get the classes that are in both `classes` and MAPPING_TARGETS.
targeted_classes = [phone for phone in classes if phone in MAPPING_TARGETS]
logits = np.compress([phone in targeted_classes for phone in classes],
logits, axis=1)
targets = np.vstack([MAPPING_TARGETS[phone] for phone in targeted_classes])
# Map logits down to 2D coordinates by weighted average.
softmax_logits = np.exp2(4.0 * logits)
softmax_logits /= np.sum(softmax_logits, axis=1, keepdims=True)
coords = np.dot(softmax_logits, targets)
hist = compute_2d_hists(labels, coords, len(classes))
# Line segment data for plotting Voronoi cell boundaries.
voronoi_x = [[0.2454, 0.4907, 0.2454, -0.2454, -0.4907, -0.2454, 0.2454,
0.4907, 0.2454, -0.2454, -0.4907, -0.2454],
[0.4907, 0.2454, -0.2454, -0.4907, -0.2454, 0.2454, 0.5774, 1.0,
0.5774, -0.5774, -1.0, -0.5774]]
voronoi_y = [[-0.425, 0.0, 0.425, 0.425, 0.0, -0.425, -0.425, 0.0, 0.425,
0.425, 0.0, -0.425],
[0.0, 0.425, 0.425, 0.0, -0.425, -0.425, -1.0, 0.0, 1.0, 1.0,
0.0, -1.0]]
# For more plot contrast, allow a small number of pixels to clip.
vmax = np.percentile(hist, 99.7)
fig = matplotlib.figure.Figure(figsize=(9, 6))
subplot_rows, subplot_cols = get_subplot_shape(len(classes))
for i in range(len(classes)):
ax = fig.add_subplot(subplot_rows, subplot_cols, i + 1)
ax.imshow(hist[i], origin='lower', aspect='equal', interpolation='bicubic',
cmap='density', vmin=0.0, vmax=vmax, extent=(-1, 1, -1, 1))
ax.plot(voronoi_x, voronoi_y, 'w-', linewidth=0.7, alpha=0.9)
ax.plot(0.98 * targets[:, 0], 0.98 * targets[:, 1], 'ko', alpha=0.3)
ax.set_title(classes[i], fontsize=14)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
fig.suptitle('Spatial histograms', fontsize=15)
return fig
def draw_hexagon(ax, **kw) -> None:
"""Draws a hexagon centered at (0, 0) on axis `ax`."""
hexagon = np.exp(1j * (np.pi / 3) * np.arange(7))
ax.plot(np.imag(hexagon), np.real(hexagon), **kw)
def plot_embedded_hists(
labels: np.ndarray,
embedded: np.ndarray,
classes: Sequence[str],
) -> Tuple[matplotlib.figure.Figure, matplotlib.figure.Figure]:
"""Plot histograms of how each phoneme class maps in the embedding space."""
hist = compute_2d_hists(labels, embedded, len(classes))
# Make a figure that shows all classes together on the same plot.
fig_merged = matplotlib.figure.Figure(figsize=(6, 6))
ax = fig_merged.add_subplot(1, 1, 1)
# Get a set of distinct colors by sampling from the 'rainbow' colormap.
cmap = matplotlib.cm.get_cmap('rainbow')
n = np.arange(len(classes))
n = (11 * n) % 37 # Scramble order so that adjacent colors are dissimilar.
x = n / 37.0
colors = ['#%02X%02X%02X' % (r, g, b)
for r, g, b in (200 * cmap(x)[:, :3]).astype(int)]
for i in range(len(classes)):
level_thresholds = np.max(hist[i]) * np.array([0.5, 1])
kwargs = {'colors': colors[i], 'origin': 'lower',
'extent': (-1, 1, -1, 1)}
contour = ax.contour(hist[i], levels=level_thresholds, **kwargs)
ax.clabel(contour, fmt=classes[i], colors=colors[i])
ax.contourf(hist[i], levels=level_thresholds, alpha=0.2, **kwargs)
draw_hexagon(ax, color='k', linewidth=0.7)
ax.set_aspect('equal', 'datalim')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
fig_merged.suptitle('Embedding histograms merged', fontsize=15)
# Make another figure with a separate plot for each class.
# For more plot contrast, allow a small number of pixels to clip.
vmax = np.percentile(hist, 99.7)
fig_separate = matplotlib.figure.Figure(figsize=(9, 6))
subplot_rows, subplot_cols = get_subplot_shape(len(classes))
for i in range(len(classes)):
ax = fig_separate.add_subplot(subplot_rows, subplot_cols, i + 1)
ax.imshow(hist[i], origin='lower', aspect='equal', interpolation='bicubic',
cmap='density', vmin=0.0, vmax=vmax, extent=(-1, 1, -1, 1))
draw_hexagon(ax, color='w', linewidth=0.7, alpha=0.9)
ax.set_title(classes[i], fontsize=14)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
fig_separate.suptitle('Embedding histograms', fontsize=15)
return fig_merged, fig_separate
def plot_kernels(params: hk.Params,
layer_name: str,
max_num: int = 9) -> matplotlib.figure.Figure:
"""Plot kernels from layer `layer_name`.
Args:
params: Model params dict.
layer_name: String.
max_num: Integer, max number of kernels to plot. The kernels with the most
energy are plotted.
Returns:
Matplotlib figure.
"""
kernel = np.asarray(params[layer_name]['w'])
top_index = np.argsort(np.sum(kernel**2, axis=0))[::-1][:max_num]
num_taps = kernel.shape[0]
fig = matplotlib.figure.Figure(figsize=(9, 6))
subplot_rows, subplot_cols = get_subplot_shape(len(top_index))
for i in range(len(top_index)):
ax = fig.add_subplot(subplot_rows, subplot_cols, i + 1)
ax.plot(kernel[:, top_index[i]], '.-' if num_taps < 60 else '-')
ax.axhline(y=0, color='k')
fig.suptitle(f'{layer_name} kernels', fontsize=15)
return fig
def eval_model(model: hk_util.TrainedModel,
dataset: phone_util.Dataset,
output_dir: str) -> None:
"""Evaluate model and write HTML report to output directory."""
classes = model.meta.classes
x_test, y_test = dataset.get_xy_arrays(classes)
outputs = model(None, {'observed': x_test})
scores = np.asarray(outputs['scores'])
s = stats.MulticlassClassifierStats(len(classes))
s.accum(y_test, scores)
d_primes = s.d_prime
confusion = stats.Confusion(s.confusion_matrix, classes)
information_transfer = confusion.transfer_bits
mean_per_class_accuracy = np.mean(np.diag(confusion.normalized_matrix))
print('mean d-prime: %.4f' % d_primes.mean())
print('information transfer: %.2f' % information_transfer)
print('mean per class accuracy: %.4f' % mean_per_class_accuracy)
# Write HTML report.
def output_file(*args):
return os.path.join(output_dir, *args)
confusion.save_csv(output_file('confusion.csv'))
os.makedirs(os.path.join(output_dir, 'images'), exist_ok=True)
report = plot.HtmlReport(output_file('report.html'), 'Eval')
report.write('<p>training completed: %s</p>'
% datetime.datetime.now().strftime('%Y-%m-%d %H:%M'))
report.write('<p>mean d-prime: %.4f</p>' % d_primes.mean())
report.write('<p>information transfer: %.2f</p>'
% information_transfer)
report.write('<p>mean per class accuracy: %.4f</p>'
% mean_per_class_accuracy)
report.write('<pre>')
report.write(hk_util.summarize_model(model.params))
report.write('</pre>')
report.write('<table><tr><th>phone</th><th>d-prime</th></tr>')
for phone, d_prime in zip(classes, d_primes):
report.write(f'<tr><td>{phone}</td><td>{d_prime:.4f}</td></tr>')
report.write('</table>')
# Plot confusion matrix.
fig = plot.plot_matrix_figure(
confusion.normalized_matrix, classes, title='Normalized confusion matrix',
row_label='True phone', col_label='Predicted phone')
report.save_figure(output_file('images', 'confusion.png'), fig)
del fig
# Plot histograms of how each phoneme class maps spatially to tactors.
fig = plot_spatial_hists(y_test, scores, classes)
report.save_figure(output_file('images', 'spatial_hists.png'), fig)
del fig
if 'embedded' in outputs:
embedded = np.asarray(outputs['embedded'])
fig_merged, fig_separate = plot_embedded_hists(y_test, embedded, classes)
report.save_figure(output_file('images', 'embedded_hists_merged.png'),
fig_merged)
report.save_figure(output_file('images', 'embedded_hists.png'),
fig_separate)
del fig_merged
del fig_separate
# Plot kernels for each layer. This is useful for tuning regularization.
layer_names = sorted(model.params.keys())
for i, layer_name in enumerate(layer_names):
if layer_name.startswith('linear'):
fig = plot_kernels(model.params, layer_name)
report.save_figure(output_file('images', 'kernels%d.png' % i), fig)
del fig
report.close()
print('\nfile://' + os.path.abspath(os.path.join(output_dir, 'report.html')))
| apache-2.0 |
sem-geologist/hyperspy | hyperspy/tests/io/test_emd.py | 3 | 26144 | # -*- coding: utf-8 -*-
# Copyright 2007-2015 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# The EMD format is a hdf5 standard proposed at Lawrence Berkeley
# National Lab (see http://emdatasets.com/ for more information).
# NOT to be confused with the FEI EMD format which was developed later.
import os.path
from os import remove
import shutil
import tempfile
import gc
from numpy.testing import assert_allclose
import numpy as np
import h5py
from dateutil import tz
from datetime import datetime
import pytest
from hyperspy.io import load
from hyperspy.signals import BaseSignal, Signal2D, Signal1D, EDSTEMSpectrum
from hyperspy.misc.test_utils import assert_deep_almost_equal
my_path = os.path.dirname(__file__)
# Reference data:
data_signal = np.arange(27).reshape((3, 3, 3))
data_image = np.arange(9).reshape((3, 3))
data_spectrum = np.arange(3)
data_save = np.arange(24).reshape((2, 3, 4))
sig_metadata = {'a': 1, 'b': 2}
user = {'name': 'John Doe', 'institution': 'TestUniversity',
'department': 'Microscopy', 'email': 'johndoe@web.de'}
microscope = {'name': 'Titan', 'voltage': '300kV'}
sample = {'material': 'TiO2', 'preparation': 'FIB'}
comments = {'comment': 'Test'}
test_title = '/signals/This is a test!'
def test_signal_3d_loading():
signal = load(os.path.join(my_path, 'emd_files', 'example_signal.emd'))
np.testing.assert_equal(signal.data, data_signal)
assert isinstance(signal, BaseSignal)
def test_image_2d_loading():
signal = load(os.path.join(my_path, 'emd_files', 'example_image.emd'))
np.testing.assert_equal(signal.data, data_image)
assert isinstance(signal, Signal2D)
def test_spectrum_1d_loading():
signal = load(os.path.join(my_path, 'emd_files', 'example_spectrum.emd'))
np.testing.assert_equal(signal.data, data_spectrum)
assert isinstance(signal, Signal1D)
def test_metadata():
signal = load(os.path.join(my_path, 'emd_files', 'example_metadata.emd'))
np.testing.assert_equal(signal.data, data_image)
np.testing.assert_equal(signal.metadata.General.title, test_title)
np.testing.assert_equal(signal.metadata.General.user.as_dictionary(), user)
np.testing.assert_equal(
signal.metadata.General.microscope.as_dictionary(),
microscope)
np.testing.assert_equal(
signal.metadata.General.sample.as_dictionary(), sample)
np.testing.assert_equal(
signal.metadata.General.comments.as_dictionary(),
comments)
for key, ref_value in sig_metadata.items():
np.testing.assert_equal(
signal.metadata.Signal.as_dictionary().get(key), ref_value)
assert isinstance(signal, Signal2D)
def test_metadata_with_bytes_string():
pytest.importorskip("natsort", minversion="5.1.0")
filename = os.path.join(
my_path, 'emd_files', 'example_bytes_string_metadata.emd')
f = h5py.File(filename, 'r')
dim1 = f['test_group']['data_group']['dim1']
dim1_name = dim1.attrs['name']
dim1_units = dim1.attrs['units']
f.close()
assert isinstance(dim1_name, np.bytes_)
assert isinstance(dim1_units, np.bytes_)
signal = load(os.path.join(my_path, 'emd_files', filename))
def test_data_numpy_object_dtype():
filename = os.path.join(
my_path, 'emd_files', 'example_object_dtype_data.emd')
signal = load(filename)
assert len(signal) == 0
def test_data_axis_length_1():
filename = os.path.join(
my_path, 'emd_files', 'example_axis_len_1.emd')
signal = load(filename)
assert signal.data.shape == (5, 1, 5)
class TestDatasetName:
def setup_method(self):
tmpdir = tempfile.TemporaryDirectory()
hdf5_dataset_path = os.path.join(tmpdir.name, "test_dataset.emd")
f = h5py.File(hdf5_dataset_path, mode="w")
f.attrs.create('version_major', 0)
f.attrs.create('version_minor', 2)
dataset_name_list = [
'/experimental/science_data_0',
'/experimental/science_data_1',
'/processed/science_data_0']
data_size_list = [(50, 50), (20, 10), (16, 32)]
for dataset_name, data_size in zip(dataset_name_list, data_size_list):
group = f.create_group(dataset_name)
group.attrs.create('emd_group_type', 1)
group.create_dataset(name='data', data=np.random.random(data_size))
group.create_dataset(name='dim1', data=range(data_size[0]))
group.create_dataset(name='dim2', data=range(data_size[1]))
f.close()
self.hdf5_dataset_path = hdf5_dataset_path
self.tmpdir = tmpdir
self.dataset_name_list = dataset_name_list
self.data_size_list = data_size_list
def teardown_method(self):
self.tmpdir.cleanup()
def test_load_with_dataset_name(self):
s = load(self.hdf5_dataset_path)
assert len(s) == len(self.dataset_name_list)
for dataset_name, data_size in zip(
self.dataset_name_list, self.data_size_list):
s = load(self.hdf5_dataset_path, dataset_name=dataset_name)
assert s.metadata.General.title == dataset_name
assert s.data.shape == data_size
def test_load_with_dataset_name_several(self):
dataset_name = self.dataset_name_list[0:2]
s = load(self.hdf5_dataset_path, dataset_name=dataset_name)
assert len(s) == len(dataset_name)
assert s[0].metadata.General.title in dataset_name
assert s[1].metadata.General.title in dataset_name
def test_wrong_dataset_name(self):
with pytest.raises(IOError):
load(self.hdf5_dataset_path, dataset_name='a_wrong_name')
with pytest.raises(IOError):
load(self.hdf5_dataset_path,
dataset_name=[self.dataset_name_list[0], 'a_wrong_name'])
class TestMinimalSave():
def test_minimal_save(self):
self.signal = Signal1D([0, 1])
with tempfile.TemporaryDirectory() as tmp:
self.signal.save(os.path.join(tmp, 'testfile.emd'))
class TestReadSeveralDatasets:
def setup_method(self):
tmpdir = tempfile.TemporaryDirectory()
hdf5_dataset_path = os.path.join(tmpdir.name, "test_dataset.emd")
f = h5py.File(hdf5_dataset_path, mode="w")
f.attrs.create('version_major', 0)
f.attrs.create('version_minor', 2)
group_path_list = ['/exp/data_0', '/exp/data_1', '/calc/data_0']
for group_path in group_path_list:
group = f.create_group(group_path)
group.attrs.create('emd_group_type', 1)
data = np.random.random((128, 128))
group.create_dataset(name='data', data=data)
group.create_dataset(name='dim1', data=range(128))
group.create_dataset(name='dim2', data=range(128))
f.close()
self.group_path_list = group_path_list
self.hdf5_dataset_path = hdf5_dataset_path
self.tmpdir = tmpdir
def teardown_method(self):
self.tmpdir.cleanup()
def test_load_file(self):
s = load(self.hdf5_dataset_path)
assert len(s) == len(self.group_path_list)
title_list = [s_temp.metadata.General.title for s_temp in s]
assert sorted(self.group_path_list) == sorted(title_list)
class TestCaseSaveAndRead():
def test_save_and_read(self):
signal_ref = BaseSignal(data_save)
signal_ref.metadata.General.title = test_title
signal_ref.axes_manager[0].name = 'x'
signal_ref.axes_manager[1].name = 'y'
signal_ref.axes_manager[2].name = 'z'
signal_ref.axes_manager[0].scale = 2
signal_ref.axes_manager[1].scale = 3
signal_ref.axes_manager[2].scale = 4
signal_ref.axes_manager[0].offset = 10
signal_ref.axes_manager[1].offset = 20
signal_ref.axes_manager[2].offset = 30
signal_ref.axes_manager[0].units = 'nm'
signal_ref.axes_manager[1].units = 'µm'
signal_ref.axes_manager[2].units = 'mm'
signal_ref.save(os.path.join(my_path, 'emd_files', 'example_temp.emd'),
overwrite=True, signal_metadata=sig_metadata,
user=user, microscope=microscope, sample=sample,
comments=comments)
signal = load(os.path.join(my_path, 'emd_files', 'example_temp.emd'))
np.testing.assert_equal(signal.data, signal_ref.data)
np.testing.assert_equal(signal.axes_manager[0].name, 'x')
np.testing.assert_equal(signal.axes_manager[1].name, 'y')
np.testing.assert_equal(signal.axes_manager[2].name, 'z')
np.testing.assert_equal(signal.axes_manager[0].scale, 2)
np.testing.assert_almost_equal(signal.axes_manager[1].scale, 3.0)
np.testing.assert_almost_equal(signal.axes_manager[2].scale, 4.0)
np.testing.assert_equal(signal.axes_manager[0].offset, 10)
np.testing.assert_almost_equal(signal.axes_manager[1].offset, 20.0)
np.testing.assert_almost_equal(signal.axes_manager[2].offset, 30.0)
np.testing.assert_equal(signal.axes_manager[0].units, 'nm')
np.testing.assert_equal(signal.axes_manager[1].units, 'µm')
np.testing.assert_equal(signal.axes_manager[2].units, 'mm')
np.testing.assert_equal(signal.metadata.General.title, test_title)
np.testing.assert_equal(
signal.metadata.General.user.as_dictionary(), user)
np.testing.assert_equal(
signal.metadata.General.microscope.as_dictionary(),
microscope)
np.testing.assert_equal(
signal.metadata.General.sample.as_dictionary(), sample)
np.testing.assert_equal(
signal.metadata.General.comments.as_dictionary(), comments)
for key, ref_value in sig_metadata.items():
np.testing.assert_equal(
signal.metadata.Signal.as_dictionary().get(key), ref_value)
assert isinstance(signal, BaseSignal)
def teardown_method(self, method):
remove(os.path.join(my_path, 'emd_files', 'example_temp.emd'))
def _generate_parameters():
parameters = []
for lazy in [True, False]:
for sum_EDS_detectors in [True, False]:
parameters.append([lazy, sum_EDS_detectors])
return parameters
class TestFeiEMD():
fei_files_path = os.path.join(my_path, "emd_files", "fei_emd_files")
@classmethod
def setup_class(cls):
import zipfile
zipf = os.path.join(my_path, "emd_files", "fei_emd_files.zip")
with zipfile.ZipFile(zipf, 'r') as zipped:
zipped.extractall(cls.fei_files_path)
@classmethod
def teardown_class(cls):
gc.collect()
shutil.rmtree(cls.fei_files_path)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_image(self, lazy):
stage = {'tilt_alpha': 0.006,
'tilt_beta': 0.000,
'x': -0.000009,
'y': 0.000144,
'z': 0.000029}
md = {'Acquisition_instrument': {'TEM': {'beam_energy': 200.0,
'camera_length': 98.0,
'magnification': 40000.0,
'microscope': 'Talos',
'Stage': stage}},
'General': {'original_filename': 'fei_emd_image.emd',
'date': '2017-03-06',
'time': '09:56:41',
'time_zone': 'BST',
'title': 'HAADF'},
'Signal': {'binned': False, 'signal_type': 'image'},
'_HyperSpy': {'Folding': {'original_axes_manager': None,
'original_shape': None,
'signal_unfolded': False,
'unfolded': False}}}
# Update time and time_zone to local ones
md['General']['time_zone'] = tz.tzlocal().tzname(datetime.today())
dt = datetime.fromtimestamp(1488794201, tz=tz.tzutc())
date, time = dt.astimezone(
tz.tzlocal()).isoformat().split('+')[0].split('T')
md['General']['date'] = date
md['General']['time'] = time
signal = load(os.path.join(self.fei_files_path, 'fei_emd_image.emd'),
lazy=lazy)
if lazy:
assert signal._lazy
signal.compute(close_file=True)
fei_image = np.load(os.path.join(self.fei_files_path,
'fei_emd_image.npy'))
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].units == 'um'
assert_allclose(signal.axes_manager[0].scale, 0.00530241, rtol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].units == 'um'
assert_allclose(signal.axes_manager[1].scale, 0.00530241, rtol=1E-5)
assert_allclose(signal.data, fei_image)
assert_deep_almost_equal(signal.metadata.as_dictionary(), md)
assert isinstance(signal, Signal2D)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_spectrum(self, lazy):
signal = load(os.path.join(
self.fei_files_path, 'fei_emd_spectrum.emd'), lazy=lazy)
if lazy:
assert signal._lazy
signal.compute(close_file=True)
fei_spectrum = np.load(os.path.join(self.fei_files_path,
'fei_emd_spectrum.npy'))
np.testing.assert_equal(signal.data, fei_spectrum)
assert isinstance(signal, Signal1D)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_si(self, lazy):
signal = load(os.path.join(self.fei_files_path, 'fei_emd_si.emd'),
lazy=lazy)
if lazy:
assert signal[1]._lazy
signal[1].compute(close_file=True)
fei_si = np.load(os.path.join(self.fei_files_path, 'fei_emd_si.npy'))
np.testing.assert_equal(signal[1].data, fei_si)
assert isinstance(signal[1], Signal1D)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_si_non_square_10frames(self, lazy):
s = load(os.path.join(
self.fei_files_path, 'fei_SI_SuperX-HAADF_10frames_10x50.emd'),
lazy=lazy)
signal = s[1]
if lazy:
assert signal._lazy
signal.compute(close_file=True)
assert isinstance(signal, EDSTEMSpectrum)
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].size == 10
assert signal.axes_manager[0].units == 'nm'
assert_allclose(signal.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].size == 50
assert signal.axes_manager[1].units == 'nm'
assert_allclose(signal.axes_manager[1].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[2].name == 'X-ray energy'
assert signal.axes_manager[2].size == 4096
assert signal.axes_manager[2].units == 'keV'
assert_allclose(signal.axes_manager[2].scale, 0.005, atol=1E-5)
signal0 = s[0]
if lazy:
assert signal0._lazy
signal0.compute(close_file=True)
assert isinstance(signal0, Signal2D)
assert signal0.axes_manager[0].name == 'x'
assert signal0.axes_manager[0].size == 10
assert signal0.axes_manager[0].units == 'nm'
assert_allclose(signal0.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal0.axes_manager[1].name == 'y'
assert signal0.axes_manager[1].size == 50
assert signal0.axes_manager[1].units == 'nm'
s = load(os.path.join(self.fei_files_path,
'fei_SI_SuperX-HAADF_10frames_10x50.emd'),
sum_frames=False,
SI_dtype=np.uint8,
rebin_energy=256,
lazy=lazy)
signal = s[1]
if lazy:
assert signal._lazy
signal.compute(close_file=True)
assert isinstance(signal, EDSTEMSpectrum)
assert signal.axes_manager.navigation_shape == (10, 50, 10)
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].size == 10
assert signal.axes_manager[0].units == 'nm'
assert_allclose(signal.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].size == 50
assert signal.axes_manager[1].units == 'nm'
assert_allclose(signal.axes_manager[1].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[2].name == 'Time'
assert signal.axes_manager[2].size == 10
assert signal.axes_manager[2].units == 's'
assert_allclose(signal.axes_manager[2].scale, 0.76800, atol=1E-5)
assert signal.axes_manager[3].name == 'X-ray energy'
assert signal.axes_manager[3].size == 16
assert signal.axes_manager[3].units == 'keV'
assert_allclose(signal.axes_manager[3].scale, 1.28, atol=1E-5)
s = load(os.path.join(self.fei_files_path,
'fei_SI_SuperX-HAADF_10frames_10x50.emd'),
sum_frames=False,
last_frame=5,
SI_dtype=np.uint8,
rebin_energy=256,
lazy=lazy)
signal = s[1]
if lazy:
assert signal._lazy
signal.compute(close_file=True)
assert isinstance(signal, EDSTEMSpectrum)
assert signal.axes_manager.navigation_shape == (10, 50, 5)
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].size == 10
assert signal.axes_manager[0].units == 'nm'
assert_allclose(signal.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].size == 50
assert signal.axes_manager[1].units == 'nm'
assert_allclose(signal.axes_manager[1].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[2].name == 'Time'
assert signal.axes_manager[2].size == 5
assert signal.axes_manager[2].units == 's'
assert_allclose(signal.axes_manager[2].scale, 0.76800, atol=1E-5)
assert signal.axes_manager[3].name == 'X-ray energy'
assert signal.axes_manager[3].size == 16
assert signal.axes_manager[3].units == 'keV'
assert_allclose(signal.axes_manager[3].scale, 1.28, atol=1E-5)
s = load(os.path.join(self.fei_files_path,
'fei_SI_SuperX-HAADF_10frames_10x50.emd'),
sum_frames=False,
first_frame=4,
SI_dtype=np.uint8,
rebin_energy=256,
lazy=lazy)
signal = s[1]
if lazy:
assert signal._lazy
signal.compute(close_file=True)
assert isinstance(signal, EDSTEMSpectrum)
assert signal.axes_manager.navigation_shape == (10, 50, 6)
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].size == 10
assert signal.axes_manager[0].units == 'nm'
assert_allclose(signal.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].size == 50
assert signal.axes_manager[1].units == 'nm'
assert_allclose(signal.axes_manager[1].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[2].name == 'Time'
assert signal.axes_manager[2].size == 6
assert signal.axes_manager[2].units == 's'
assert_allclose(signal.axes_manager[2].scale, 0.76800, atol=1E-5)
assert signal.axes_manager[3].name == 'X-ray energy'
assert signal.axes_manager[3].size == 16
assert signal.axes_manager[3].units == 'keV'
assert_allclose(signal.axes_manager[3].scale, 1.28, atol=1E-5)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_si_non_square_20frames(self, lazy):
s = load(os.path.join(
self.fei_files_path,
'fei_SI_SuperX-HAADF_20frames_10x50.emd'),
lazy=lazy)
signal = s[1]
if lazy:
assert signal._lazy
signal.compute(close_file=True)
assert isinstance(signal, EDSTEMSpectrum)
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].size == 10
assert signal.axes_manager[0].units == 'nm'
assert_allclose(signal.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].size == 50
assert signal.axes_manager[1].units == 'nm'
assert_allclose(signal.axes_manager[1].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[2].name == 'X-ray energy'
assert signal.axes_manager[2].size == 4096
assert signal.axes_manager[2].units == 'keV'
assert_allclose(signal.axes_manager[2].scale, 0.005, atol=1E-5)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_si_non_square_20frames_2eV(self, lazy):
s = load(os.path.join(
self.fei_files_path,
'fei_SI_SuperX-HAADF_20frames_10x50_2ev.emd'),
lazy=lazy)
signal = s[1]
if lazy:
assert signal._lazy
signal.compute(close_file=True)
assert isinstance(signal, EDSTEMSpectrum)
assert signal.axes_manager[0].name == 'x'
assert signal.axes_manager[0].size == 10
assert signal.axes_manager[0].units == 'nm'
assert_allclose(signal.axes_manager[0].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[1].name == 'y'
assert signal.axes_manager[1].size == 50
assert signal.axes_manager[1].units == 'nm'
assert_allclose(signal.axes_manager[1].scale, 1.234009, atol=1E-5)
assert signal.axes_manager[2].name == 'X-ray energy'
assert signal.axes_manager[2].size == 4096
assert signal.axes_manager[2].units == 'keV'
assert_allclose(signal.axes_manager[2].scale, 0.002, atol=1E-5)
@pytest.mark.parametrize("lazy", (True, False))
def test_fei_emd_si_frame_range(self, lazy):
signal = load(os.path.join(self.fei_files_path, 'fei_emd_si.emd'),
first_frame=2, last_frame=4, lazy=lazy)
fei_si = np.load(os.path.join(self.fei_files_path,
'fei_emd_si_frame.npy'))
if lazy:
assert signal[1]._lazy
signal[1].compute(close_file=True)
np.testing.assert_equal(signal[1].data, fei_si)
assert isinstance(signal[1], Signal1D)
@pytest.mark.parametrize(["lazy", "sum_EDS_detectors"],
_generate_parameters())
def test_fei_si_4detectors(self, lazy, sum_EDS_detectors):
fname = os.path.join(self.fei_files_path,
'fei_SI_EDS-HAADF-4detectors_2frames.emd')
signal = load(fname, sum_EDS_detectors=sum_EDS_detectors, lazy=lazy)
if lazy:
assert signal[1]._lazy
signal[1].compute(close_file=True)
length = 6
if not sum_EDS_detectors:
length += 3
assert len(signal) == length
# TODO: add parsing azimuth_angle
def test_fei_emd_ceta_camera(self):
signal = load(
os.path.join(
self.fei_files_path,
'1532 Camera Ceta.emd'))
assert_allclose(signal.data, np.zeros((64, 64)))
assert isinstance(signal, Signal2D)
date, time = self._convert_datetime(1512055942.914275).split('T')
assert signal.metadata.General.date == date
assert signal.metadata.General.time == time
assert signal.metadata.General.time_zone == self._get_local_time_zone()
signal = load(
os.path.join(
self.fei_files_path,
'1854 Camera Ceta.emd'))
assert_allclose(signal.data, np.zeros((64, 64)))
assert isinstance(signal, Signal2D)
def _convert_datetime(self, unix_time):
# Since we don't know the actual time zone of where the data have been
# acquired, we convert the datetime to the local time for convenience
dt = datetime.fromtimestamp(float(unix_time), tz=tz.tzutc())
return dt.astimezone(tz.tzlocal()).isoformat().split('+')[0]
def _get_local_time_zone(self):
return tz.tzlocal().tzname(datetime.today())
def time_loading_frame(self):
# Run this function to check the loading time when loading EDS data
import time
frame_number = 100
point_measurement = 15
frame_offsets = np.arange(0, point_measurement * frame_number,
frame_number)
time_data = np.zeros_like(frame_offsets)
path = 'path to large dataset'
for i, frame_offset in enumerate(frame_offsets):
print(frame_offset + frame_number)
t0 = time.time()
load(os.path.join(path, 'large dataset.emd'),
first_frame=frame_offset, last_frame=frame_offset + frame_number)
t1 = time.time()
time_data[i] = t1 - t0
import matplotlib.pyplot as plt
plt.plot(frame_offsets, time_data)
plt.xlabel('Frame offset')
plt.xlabel('Loading time (s)')
| gpl-3.0 |
Leberwurscht/Python-Guitar-Transcription-Aid | Analyze.py | 1 | 1678 | #!/usr/bin/env python
import gtk, numpy, scipy.ndimage
import matplotlib
import matplotlib.backends.backend_gtkcairo as mpl_backend
def get_power(data):
# apply window
window = numpy.hanning(len(data))
data *= window
# fft
power = numpy.abs(numpy.fft.rfft(data))**2.
return power
def smooth(array, window=3):
smoothed = numpy.convolve(array, numpy.hanning(window), "same")
return smoothed
def find_peaks(frq,power,max_window=3,min_window=3,height=0.0001):
max_filtered = scipy.ndimage.maximum_filter1d(power,size=max_window)
min_filtered = scipy.ndimage.minimum_filter1d(power,size=min_window)
maxima = numpy.logical_and(max_filtered==power, max_filtered-min_filtered>height)
maxima_indices = numpy.nonzero(maxima)[0]
return maxima_indices
class Analyze(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
fig = matplotlib.figure.Figure(figsize=(5,4))
self.ax = fig.add_subplot(111)
vbox = gtk.VBox()
self.add(vbox)
self.figure = mpl_backend.FigureCanvasGTK(fig)
self.figure.set_size_request(500,400)
self.navbar = mpl_backend.NavigationToolbar2Cairo(self.figure, self)
vbox.pack_start(self.figure)
vbox.pack_start(self.navbar, False, False)
def simple_plot(self, x, y, **kwargs):
self.ax.plot(x, y, **kwargs)
def add_line(self, pos, **kwargs):
self.ax.axvline(pos, **kwargs)
def plot_spectrum(self, frq, power):
self.simple_plot(frq, power, color="g")
# self.ax.plot(frq, 10*numpy.log10(power), color="r")
for semitone in xrange(-29,50):
f = 440. * ( 2.**(1./12.) )**semitone
self.ax.axvline(f, color="r")
for maximum in find_peaks(frq, power, 3, 3, 10):
self.ax.axvline(frq[maximum], color="k")
| gpl-3.0 |
ChanderG/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
zuku1985/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/integrate/quadrature.py | 25 | 27849 | from __future__ import division, print_function, absolute_import
__all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb',
'cumtrapz','newton_cotes']
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from numpy import sum, ones, add, diff, isinf, isscalar, \
asarray, real, trapz, arange, empty
import numpy as np
import math
import warnings
from scipy._lib.six import xrange
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results for speeding up multiple calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func,a,b,args=(),n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
[x,w] = _cached_p_roots(n)
x = real(x)
ainf, binf = map(isinf,(a,b))
if ainf or binf:
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0*sum(w*func(y,*args),0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if isscalar(x):
return func(x, *args)
x = asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
if hasattr(y0, 'dtype'):
output = empty((n,), dtype=y0.dtype)
else:
output = empty((n,), dtype=type(y0))
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = asarray(y)
if x is None:
d = dx
else:
x = asarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = add.accumulate(d * (y[slice1] + y[slice2]) / 2.0, axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y,start,stop,x,dx,axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
all = (slice(None),)*nd
slice0 = tupleset(all, axis, slice(start, stop, step))
slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = add.reduce(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = diff(x,axis=axis)
sl0 = tupleset(all, axis, slice(start, stop, step))
sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1)),axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = asarray(x)
if len(x.shape) == 1:
shapex = ones(nd)
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be 'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y,0,N-3,x,dx,axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y,1,N-2,x,dx,axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y,0,N-2,x,dx,axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
all = (slice(None),) * nd
slice0 = tupleset(all, axis, 0)
slicem1 = tupleset(all, axis, -1)
h = Ninterv*asarray(dx)*1.0
R[(0,0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = all
start = stop = step = Ninterv
for i in range(1,k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start,stop,step))
step >>= 1
R[(i,0)] = 0.5*(R[(i-1,0)] + h*add.reduce(y[slice_R],axis))
for j in range(1,i+1):
R[(i,j)] = R[(i,j-1)] + \
(R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*j))-1)
h = h / 2.0
if show:
if not isscalar(R[(0,0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%" + str(width) + '.' + str(precis)+'f'
print("\n Richardson Extrapolation Table for Romberg Integration ")
print("====================================================================")
for i in range(0,k+1):
for j in range(0,i+1):
print(formstr % R[(i,j)], end=' ')
print()
print("====================================================================\n")
return R[(k,k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * arange(0, numtosum)
s = sum(function(points),0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if isinf(a) or isinf(b):
raise ValueError("Romberg integration only available for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a,b]
intrange = b-a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
for i in xrange(1, divmax+1):
n = n * 2
ordsum = ordsum + _difftrap(vfunc, interval, n)
resmat.append([])
resmat[i].append(intrange * ordsum / n)
for k in range(i):
resmat[i].append(_romberg_diff(resmat[i-1][k], resmat[i][k], k+1))
result = resmat[i][i]
lastresult = resmat[i-1][i-1]
err = abs(result - lastresult)
if err < tol or err < rtol*abs(result):
break
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1:(1,2,[1,1],-1,12),
2:(1,3,[1,4,1],-1,90),
3:(3,8,[1,3,3,1],-3,80),
4:(2,45,[7,32,12,32,7],-8,945),
5:(5,288,[19,75,50,50,75,19],-275,12096),
6:(1,140,[41,216,27,272,27,216,41],-9,1400),
7:(7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8:(4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9:(9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10:(5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11:(11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12:(1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13:(13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14:(7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]` and :math:`\\Delta x = \\frac{x_N-x_0}{N}`
is the averages samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
return na*np.array(vi,float)/da, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2.0*yi - 1
nvec = np.arange(0,N+1)
C = ti**nvec[:,np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = np.dot(Cinv[:,::2],vec) * N/2
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
wrshoemaker/ffpopsim | examples/mutation_selection_balance_highd.py | 2 | 3699 | '''
author: Richard Neher, Fabio Zanini
date: 11/07/12
content: Example on the steady state distribution of allele frequency in a
balance between mutation and genetic drift using haploid_highd.
'''
# Import modules (setting the path should not be necessary when the module is
# installed in the PYTHONPATH)
import sys
sys.path.insert(0, '../pkg/python')
import numpy as np
import matplotlib.pyplot as plt
import FFPopSim as h
# specify parameters
N = 500 # population size
L = 64 # number of loci
s = np.linspace(-2 ,2, L) / N # additive selection coefficients for L loci, scaled to N
mu = 0.5 / N # mutation rate, scaled to N
r = 50.0 / N / L # recombination rate for each interval between loci
# set up population
pop = h.haploid_highd(L) # produce an instance of haploid_highd with L loci
# set and additive fitness function. Note that FFPopSim models fitness landscape
# in a +/- rather than 0/1 basis, hence the factor 1/2
pop.set_fitness_additive(0.5 * s)
pop.mutation_rate = mu # mutation rate
pop.recombination_model = h.CROSSOVERS # recombination model
pop.outcrossing_rate = 1 # obligate sexual
pop.crossover_rate = r # crossover rate
# initialize population in linkage equilibrium with frequencies 0.5 and size N
pop.carrying_capacity = N # set the population size
pop.set_allele_frequencies(0.5 * np.ones(L), N)
print "Evolve for >> N generations and compare allele frequency distributions \nto expectations from diffusion theory."
pop.status()
pop.evolve(10 * N) # evolve for 10N generations to equilibrate
# evolve and sample allele_frequencies
nsamples = 10000
allele_frequencies = np.zeros((nsamples,L))
for ii in range(nsamples):
pop.evolve(0.1 * N) # N / 10 generations between successive samples
# print output every 100 generations
if (ii % 100 == 0):
print ii, "out of", nsamples, ". Population size: ", pop.population_size, "Number of clones", pop.number_of_clones
# get allele frequencies
allele_frequencies[ii,:] = pop.get_allele_frequencies()
# prepare allele frequency histogram
af_bins = np.linspace(0,1,26) # bins for histogram
af_bins[0] -= 1.0/N
af_bins[-1] += 1.0/N
bc = 0.5*(af_bins[1:]+af_bins[:-1]) # bin centers for plotting
# plot results
plt.figure()
for locus in range(L):
# make histogram
y,x = np.histogram(allele_frequencies[:,locus], bins=af_bins, normed='True')
# plot
plt.plot(bc, y, color=plt.cm.jet(locus*4))
plt.title('Comparison to diffusion theory for $rN='+str(r*N)+'$, $\mu N='+str(mu*N)+'$, $N='+str(N)+'$')
plt.text(0.3,3,"Color indicates selection coefficient \nfrom Ns=-2..2")
plt.xlabel(r'Allele frequency $\nu$')
plt.ylabel(r'Allele frequency distribution $f(\nu)$')
# compare explicitly to diffusion theory by normalizing to the diffusion theory prediction
plt.figure()
for locus in range(L):
y,x = np.histogram(allele_frequencies[:,locus], bins=af_bins, normed='True')
# calculate the diffusion theory single locus result
diffusion_theory = bc**(2*N*mu-1)*(1-bc)**(2*N*mu-1)*np.exp(2*N*s[locus]*bc)
# normalize
diffusion_theory /= np.sum(diffusion_theory)*(bc[1]-bc[0])
# plot normalized
plt.plot(bc, y/diffusion_theory, color=plt.cm.jet(locus*4), ls='-')
plt.title('Comparison to diffusion theory for $rN='+str(r*N)+'$, $\mu N='+str(mu*N)+'$, $N='+str(N)+'$')
plt.xlabel(r'Allele frequency $\nu$')
plt.ylabel('Allele frequency distribution/Diffusion theory result')
plt.ion()
plt.show()
| gpl-3.0 |
mikelum/pyspeckit | examples/interactive_example_hr2421.py | 8 | 11287 | import pyspeckit
# neet matplotlib so we can make mouse-click events from the script
import matplotlib
import os
# list of annotations so we can clear them
annotations = []
excesslines = []
# get the data from http://cdsarc.u-strasbg.fr/ftp/cats/II/179/sp/hr2421.fit
import urllib2
url = urllib2.urlopen('http://cdsarc.u-strasbg.fr/ftp/cats/II/179/sp/hr2421.fit')
with open('hr2421.fit','wb') as outfile:
outfile.write(url.read())
# Load the spectrum
sp = pyspeckit.Spectrum('hr2421.fit')
# Plot a particular spectral line
sp.plotter(xmin=4700,xmax=5000)
# Need to fit the continuum first
sp.baseline(interactive=True, subtract=False)
# Left-click to select the fitting region
event1 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,157,316,button=1)
annotations.append( sp.plotter.axis.annotate("First click\n(button 1)", xy=(event1.xdata,event1.ydata), xytext=(event1.xdata+20,event1.ydata+5e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='green')) )
annotations.append (sp.plotter.axis.annotate("", xy=(4714,9.54e-10), xytext=(event1.xdata+20,event1.ydata+8e-11),
textcoords='data', xycoords='data', ha='center', va='bottom',
arrowprops=dict(arrowstyle="->", connectionstyle='arc,rad=0.5',
color='green')) )
annotations.append( sp.plotter.axis.annotate("(only the horizontal\n position matters)",xy=(event1.xdata+10,event1.ydata),xycoords='data') )
excesslines.append( sp.plotter.axis.vlines(4714,sp.plotter.ymin.value,sp.plotter.ymax.value,linestyle='--',color='green') )
sp.plotter.refresh()
sp.plotter.savefig('interactive_example_hr2421_baseline_firstclick.png', bbox_inches=None)
event2 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,850,280,button=1)
# event2.xdata and .ydata is None here
event2.xdata = 850
event2.ydata = 850
annotations.append( sp.plotter.axis.annotate("Second click\n(button 1)", xy=(event2.xdata,event2.ydata), xytext=(event2.xdata-20,event2.ydata+2.5e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='green')) )
annotations.append( sp.plotter.axis.annotate("", xy=(event2.xdata,8.0e-10), xytext=(event2.xdata-20,event2.ydata+6e-11),
textcoords='data', xycoords='data', ha='center', va='bottom',
arrowprops=dict(arrowstyle="->", connectionstyle='arc,rad=0.5',
color='green')) )
excesslines.append( sp.plotter.axis.vlines(event2.xdata,sp.plotter.ymin.value,sp.plotter.ymax.value,linestyle='--',color='green'))
sp.plotter.refresh()
sp.plotter.savefig('interactive_example_hr2421_baseline_secondclick.png', bbox_inches=None)
sp.baseline.event_manager(event1)
sp.baseline.event_manager(event2)
sp.plotter.savefig('interactive_example_hr2421_baseline_secondclick_highlight.png', bbox_inches=None)
event3 = matplotlib.backend_bases.KeyEvent('button_press_event', sp.plotter.axis.figure.canvas,x=425,y=316,key='x')
annotations.append( sp.plotter.axis.annotate("Third click\n(key 'x')", xy=(event3.xdata,event3.ydata), xytext=(event3.xdata-20,event3.ydata+5e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='red')) )
annotations.append (sp.plotter.axis.annotate("", xy=(4825,8.67e-10), xytext=(event3.xdata-20,event3.ydata+8e-11),
textcoords='data', xycoords='data', ha='center', va='bottom',
arrowprops=dict(arrowstyle="->", connectionstyle='arc,rad=0.5',
color='red')) )
annotations.append( sp.plotter.axis.annotate("(to exclude the line\nfrom the baseline fit)",xy=(event3.xdata-55,event3.ydata-5e-11),xycoords='data',color='red') )
excesslines.append( sp.plotter.axis.vlines(4825,sp.plotter.ymin.value,sp.plotter.ymax.value,linestyle='--',color='red') )
sp.plotter.refresh()
sp.plotter.savefig('interactive_example_hr2421_baseline_thirdclick.png', bbox_inches=None)
event4 = matplotlib.backend_bases.KeyEvent('button_press_event', sp.plotter.axis.figure.canvas,x=645,y=280,key='x')
event4.xdata = 900
event4.ydata = 900
annotations.append( sp.plotter.axis.annotate("Fourth click\n(key 'x')", xy=(event4.xdata,event4.ydata), xytext=(event4.xdata+20,event4.ydata+2.5e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='red')) )
annotations.append( sp.plotter.axis.annotate("", xy=(4905,8.0e-10), xytext=(event4.xdata+20,event4.ydata+6e-11),
textcoords='data', xycoords='data', ha='center', va='bottom',
arrowprops=dict(arrowstyle="->", connectionstyle='arc,rad=0.5',
color='red')) )
excesslines.append( sp.plotter.axis.vlines(4905,sp.plotter.ymin.value,sp.plotter.ymax.value,linestyle='--',color='red'))
sp.plotter.refresh()
sp.plotter.savefig('interactive_example_hr2421_baseline_fourthclick.png', bbox_inches=None)
sp.baseline.event_manager(event3)
sp.baseline.event_manager(event4)
sp.plotter.savefig('interactive_example_hr2421_baseline_fourthclick_highlight.png', bbox_inches=None)
annotations.append( sp.plotter.axis.annotate("Fifth click - perform the fit\n(button 3;\nbutton 2 will subtract)",
xy=(4865, 1e-9), xytext=(4865, 1e-9), textcoords='data',
xycoords='data', ha='center', va='bottom') )
event5 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,787,223,button=3)
sp.baseline.event_manager(event5)
sp.plotter.savefig('interactive_example_hr2421_baseline_fifthclick_fit.png', bbox_inches=None)
for LC in excesslines:
if LC in sp.plotter.axis.collections:
sp.plotter.axis.collections.remove(LC)
for AN in annotations:
if AN in sp.plotter.axis.texts:
sp.plotter.axis.texts.remove(AN)
sp.plotter.refresh()
# ********************************************
# Start up an interactive line-fitting session
# ********************************************
sp.specfit(interactive=True)
# Left-click to select the fitting region
event1 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,257,316,button=1)
annotations.append( sp.plotter.axis.annotate("First click\n(button 1)", xy=(event1.xdata,event1.ydata), xytext=(event1.xdata+20,event1.ydata),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='green')) )
annotations.append (sp.plotter.axis.annotate("", xy=(event1.xdata,9.26e-10), xytext=(event1.xdata+20,event1.ydata+5e-11),
textcoords='data', xycoords='data', ha='center', va='bottom',
arrowprops=dict(arrowstyle="->", connectionstyle='arc,rad=0.5',
color='green')) )
annotations.append( sp.plotter.axis.annotate("(only the horizontal\n position matters)",xy=(event1.xdata+2,event1.ydata-1e-10),xycoords='data') )
excesslines.append( sp.plotter.axis.vlines(event1.xdata,sp.plotter.ymin,sp.plotter.ymax,linestyle='--',color='green') )
sp.plotter.refresh()
sp.plotter.savefig('figures/interactive_example_hr2421_firstclick.png', bbox_inches=None)
event2 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,732,280,button=1)
annotations.append( sp.plotter.axis.annotate("Second click\n(button 1)", xy=(event2.xdata,event2.ydata), xytext=(event2.xdata+20,event2.ydata+3e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='green')) )
annotations.append( sp.plotter.axis.annotate("", xy=(4943,8.29e-10), xytext=(event2.xdata+20,event2.ydata+6e-11),
textcoords='data', xycoords='data', ha='center', va='bottom',
arrowprops=dict(arrowstyle="->", connectionstyle='arc,rad=0.5',
color='green')) )
excesslines.append( sp.plotter.axis.vlines(4943,sp.plotter.ymin,sp.plotter.ymax,linestyle='--',color='green'))
sp.plotter.refresh()
sp.plotter.savefig('figures/interactive_example_hr2421_secondclick.png', bbox_inches=None)
sp.specfit.event_manager(event1)
sp.specfit.event_manager(event2)
sp.plotter.savefig('figures/interactive_example_hr2421_secondclick_highlight.png', bbox_inches=None)
for LC in excesslines:
if LC in sp.plotter.axis.collections:
sp.plotter.axis.collections.remove(LC)
for AN in annotations:
if AN in sp.plotter.axis.texts:
sp.plotter.axis.texts.remove(AN)
sp.plotter.refresh()
event3 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,523,194,button=2)
annotations.append( sp.plotter.axis.annotate("Third click\n(button 2)", xy=(event3.xdata,event3.ydata), xytext=(event3.xdata+40,event3.ydata-5e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='orange', shrinkB=5)) )
sp.specfit.event_manager(event3)
sp.plotter.savefig('figures/interactive_example_hr2421_thirdclick.png', bbox_inches=None)
event4 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,485,264,button=2)
annotations.append( sp.plotter.axis.annotate("Fourth click\n(button 2)", xy=(event4.xdata,event4.ydata), xytext=(event4.xdata-20,event4.ydata-5e-11),
textcoords='data', xycoords='data', ha='center',
va='bottom', arrowprops=dict(arrowstyle="->",
connectionstyle='arc,rad=0.5', color='orange', shrinkB=5)) )
sp.specfit.event_manager(event4)
sp.plotter.savefig('figures/interactive_example_hr2421_fourthclick.png', bbox_inches=None)
model = sp.specfit.Registry.multifitters['gaussian'].n_modelfunc(sp.specfit.guesses)(sp.xarr) + sp.baseline.basespec
sp.plotter.axis.plot(sp.xarr,model,color='b')
annotations.append( sp.plotter.axis.annotate("The guessed model",
xy=(4770,8.2e-10), xytext=(4770,8.2e-10), textcoords='data',
xycoords='data', ha='center', va='bottom', color='blue') )
sp.plotter.refresh()
sp.plotter.savefig('figures/interactive_example_hr2421_gaussmodelguess.png', bbox_inches=None)
for LC in excesslines:
if LC in sp.plotter.axis.collections:
sp.plotter.axis.collections.remove(LC)
for AN in annotations:
if AN in sp.plotter.axis.texts:
sp.plotter.axis.texts.remove(AN)
annotations.append( sp.plotter.axis.annotate("Fifth click - perform the fit\n(button 3)",
xy=(4855, 9.1e-10), xytext=(4855,9.1e-10), textcoords='data',
xycoords='data', ha='center', va='bottom') )
event5 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,787,223,button=3)
sp.specfit.event_manager(event5)
sp.plotter.savefig('figures/interactive_example_hr2421_fifthclick_fit.png', bbox_inches=None)
#sp.plotter.figure.savefig(savedir+'hr2421_interactive_selectregion.png', bbox_inches=None)
#sp.plotter.figure.savefig(savedir+'hr2421_interactive_guesses.png', bbox_inches=None)
#event5 = matplotlib.backend_bases.MouseEvent('button_press_event', sp.plotter.axis.figure.canvas,611,247,button=3)
#sp.specfit.event_manager(event5)
| mit |
piyueh/PetIBM | examples/ibpm/cylinder2dRe550/scripts/plotVorticity.py | 6 | 1402 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 1200 time steps (3 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 1200
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-32.0, 32.0, 32)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-0.6, 1.6)
ax.set_ylim(-0.8, 0.8)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
samuel1208/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
skidzo/sympy | sympy/plotting/tests/test_plot.py | 6 | 9933 | from sympy import (pi, sin, cos, Symbol, Integral, Sum, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities import lambdify as lambdify_
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
import warnings
class MockPrint(object):
def write(self, s):
pass
def flush(self):
pass
encoding = 'utf-8'
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p._backend.close()
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p._backend.close()
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p._backend.close()
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p._backend.close()
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
p._backend.close()
raises(ValueError, lambda: plot(x, y))
p = plot(Piecewise((1, x > 0), (0, True)),(x,-1,1))
p.save(tmp_file('%s_plot_piecewise' % name))
p._backend.close()
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
p._backend.close()
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
p._backend.close()
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
p._backend.close()
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
p._backend.close()
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
p._backend.close()
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p._backend.close()
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
p._backend.close()
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
p._backend.close()
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
p._backend.close()
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
p._backend.close()
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p._backend.close()
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambdify_(x, sin(4*x))
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p._backend.close()
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambdify_((x, y, z), sqrt((x - 3*pi)**2 + y**2))
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p._backend.close()
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambdify_((x, y, z), sqrt(x**2 + y**2 + z**2))
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
p._backend.close()
###
# Examples from the 'advanced' notebook
###
# XXX: This raises the warning "The evaluation of the expression is
# problematic. We are trying a failback method that may still work. Please
# report this as a bug." It has to use the fallback because using evalf()
# is the only way to evaluate the integral. We should perhaps just remove
# that warning.
with warnings.catch_warnings(record=True) as w:
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
p._backend.close()
# Make sure no other warnings were raised
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "The evaluation of the expression is problematic" in str(w[0].message)
s = Sum(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p._backend.close()
p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
p._backend.close()
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
SAGES-UCSC/Photometry | calcZeropoint.py | 1 | 3896 | import sys
import os
from subprocess import call
import numpy as np
import matplotlib.pyplot as plt
from astroquery.vizier import Vizier
import astropy.units as u
import makeRegionFile
import Quadtree as Q
import Sources as S
import phot_utils
import geom_utils
def associate(table, tree2):
dist = 0.000014
matches = []
for entry in table:
match = tree2.match(entry['RAJ2000'], entry['DEJ2000'])
if match != None and geom_utils.equnorm(entry['RAJ2000'], entry['DEJ2000'], \
match.ra, match.dec) <= dist:
match.match2 = entry
matches.append(match)
return matches
def getSDSS(galaxy):
"""
Query SDSS through Vizier, pick out only the stellar sources,
and put the SDSS magnitudes into AB
"""
Vizier.ROW_LIMIT = -1 # Removes row limit on output table
result = Vizier.query_region(galaxy, width=1.0*u.deg,
height=1.0*u.deg, catalog='SDSS')
# Only select stellar sources
index = []
for i, entry in enumerate(result[1]):
if entry['cl'] != 6:
index.append(i)
# Get the most recent SDSS catalog
result[len(result) - 1].remove_rows(index)
# SDSS magnitudes are not exactly in AB so need to correct (not doing this yet).
return result[len(result)-1]
def calcZP(galaxy, scam, band):
"""
To calculate the zeropoint of the Subaru image match the Subaru catalog
and the table returned from Vizier.
"""
sdss = getSDSS(galaxy)
column = str(band + 'mag')
print "Column: ", column
# Get only the brightest sources of both SDSS and Subaru.
mag = map(lambda source: source[column], sdss)
max_mag = np.mean(mag) + 0.25*np.mean(np.std(mag))
sdss = filter(lambda s: phot_utils.mag_cut(s[column], 18, max_mag), sdss)
with open(scam, 'r') as f:
catalog = [S.SCAMSource(line) for line in f if phot_utils.no_head(line)]
mag = map(lambda s: s.mag_best, catalog)
max_mag = np.mean(mag) + 0.25*np.mean(np.std(mag))
sources = filter(lambda s: phot_utils.mag_cut(s.mag_best, 18, max_mag), catalog)
ra = [source.ra for source in catalog]
dec = [source.dec for source in catalog]
scam_sources = Q.ScamEquatorialQuadtree(min(ra), min(dec),
max(ra), max(dec))
map(lambda sources: scam_sources.insert(sources), sources)
matches = associate(sdss, scam_sources)
m_scam_sources = map(lambda source: source.mag_aper, matches)
m_sdss_sources = map(lambda source: source.match2[column], matches)
# Clip outliers of (m_sdss - m_scam)
difference = []
for m_sdss, m_scam in zip(m_sdss_sources, m_scam_sources):
difference.append(m_sdss - m_scam)
std = np.std(difference)
# Make a region file to check the matching
makeRegionFile.fromList(matches, band + "_scam_match_source.reg", 0.1, "red")
makeRegionFile.fromList(matches, band + "_sdss_match_source.reg", 0.1, "green")
clipped = []
for entry in matches:
if entry.match2[column] - entry.mag_aper < std*3:
clipped.append(entry)
difference = []
for entry in clipped:
difference.append(entry.match2[column] - entry.mag_aper)
m_scam = map(lambda source: source.mag_aper, clipped)
# Look at offsets
plt.plot(difference, m_scam, linestyle='none', marker='o')
plt.xlabel(r'$m_{SDSS}$ - $m_{SCAM}$', fontsize=20)
plt.ylabel(r'$m_{SCAM}$', fontsize=20, labelpad=30)
path = os.getcwd()
phot_utils.save(path, band + '_zp.png')
# Take median of offset
return np.median(difference)
def main():
galaxy, scam_catalog, band = sys.argv[1], sys.argv[2], sys.argv[3]
print "Zeropoint is: ", calcZP(galaxy, scam_catalog, band)
if __name__ == '__main__':
#calcZP(sys.argv[1], sys.argv[2], sys.argv[3])
sys.exit(main())
| mit |
xuq/lightfm | tests/utils.py | 11 | 2205 | import numpy as np
from sklearn.metrics import roc_auc_score
def precision_at_k(model, ground_truth, k, user_features=None, item_features=None):
"""
Measure precision at k for model and ground truth.
Arguments:
- lightFM instance model
- sparse matrix ground_truth (no_users, no_items)
- int k
Returns:
- float precision@k
"""
ground_truth = ground_truth.tocsr()
no_users, no_items = ground_truth.shape
pid_array = np.arange(no_items, dtype=np.int32)
precisions = []
for user_id, row in enumerate(ground_truth):
uid_array = np.empty(no_items, dtype=np.int32)
uid_array.fill(user_id)
predictions = model.predict(uid_array, pid_array,
user_features=user_features,
item_features=item_features,
num_threads=4)
top_k = set(np.argsort(-predictions)[:k])
true_pids = set(row.indices[row.data == 1])
if true_pids:
precisions.append(len(top_k & true_pids) / float(k))
return sum(precisions) / len(precisions)
def full_auc(model, ground_truth, user_features=None, item_features=None):
"""
Measure AUC for model and ground truth on all items.
Arguments:
- lightFM instance model
- sparse matrix ground_truth (no_users, no_items)
Returns:
- float AUC
"""
ground_truth = ground_truth.tocsr()
no_users, no_items = ground_truth.shape
pid_array = np.arange(no_items, dtype=np.int32)
scores = []
for user_id, row in enumerate(ground_truth):
uid_array = np.empty(no_items, dtype=np.int32)
uid_array.fill(user_id)
predictions = model.predict(uid_array, pid_array,
user_features=user_features,
item_features=item_features,
num_threads=4)
true_pids = row.indices[row.data == 1]
grnd = np.zeros(no_items, dtype=np.int32)
grnd[true_pids] = 1
if len(true_pids):
scores.append(roc_auc_score(grnd, predictions))
return sum(scores) / len(scores)
| apache-2.0 |
huzq/scikit-learn | examples/linear_model/plot_sgd_early_stopping.py | 17 | 5651 | """
=============================================
Early stopping of Stochastic Gradient Descent
=============================================
Stochastic Gradient Descent is an optimization technique which minimizes a loss
function in a stochastic fashion, performing a gradient descent step sample by
sample. In particular, it is a very efficient method to fit linear models.
As a stochastic method, the loss function is not necessarily decreasing at each
iteration, and convergence is only guaranteed in expectation. For this reason,
monitoring the convergence on the loss function can be difficult.
Another approach is to monitor convergence on a validation score. In this case,
the input data is split into a training set and a validation set. The model is
then fitted on the training set and the stopping criterion is based on the
prediction score computed on the validation set. This enables us to find the
least number of iterations which is sufficient to build a model that
generalizes well to unseen data and reduces the chance of over-fitting the
training data.
This early stopping strategy is activated if ``early_stopping=True``; otherwise
the stopping criterion only uses the training loss on the entire input data. To
better control the early stopping strategy, we can specify a parameter
``validation_fraction`` which set the fraction of the input dataset that we
keep aside to compute the validation score. The optimization will continue
until the validation score did not improve by at least ``tol`` during the last
``n_iter_no_change`` iterations. The actual number of iterations is available
at the attribute ``n_iter_``.
This example illustrates how the early stopping can used in the
:class:`~sklearn.linear_model.SGDClassifier` model to achieve almost the same
accuracy as compared to a model built without early stopping. This can
significantly reduce training time. Note that scores differ between the
stopping criteria even from early iterations because some of the training data
is held out with the validation stopping criterion.
"""
# Authors: Tom Dupre la Tour
#
# License: BSD 3 clause
import time
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import shuffle
print(__doc__)
def load_mnist(n_samples=None, class_0='0', class_1='8'):
"""Load MNIST, select two classes, shuffle and return only n_samples."""
# Load data from http://openml.org/d/554
mnist = fetch_openml('mnist_784', version=1)
# take only two classes for binary classification
mask = np.logical_or(mnist.target == class_0, mnist.target == class_1)
X, y = shuffle(mnist.data[mask], mnist.target[mask], random_state=42)
if n_samples is not None:
X, y = X[:n_samples], y[:n_samples]
return X, y
@ignore_warnings(category=ConvergenceWarning)
def fit_and_score(estimator, max_iter, X_train, X_test, y_train, y_test):
"""Fit the estimator on the train set and score it on both sets"""
estimator.set_params(max_iter=max_iter)
estimator.set_params(random_state=0)
start = time.time()
estimator.fit(X_train, y_train)
fit_time = time.time() - start
n_iter = estimator.n_iter_
train_score = estimator.score(X_train, y_train)
test_score = estimator.score(X_test, y_test)
return fit_time, n_iter, train_score, test_score
# Define the estimators to compare
estimator_dict = {
'No stopping criterion':
linear_model.SGDClassifier(n_iter_no_change=3),
'Training loss':
linear_model.SGDClassifier(early_stopping=False, n_iter_no_change=3,
tol=0.1),
'Validation score':
linear_model.SGDClassifier(early_stopping=True, n_iter_no_change=3,
tol=0.0001, validation_fraction=0.2)
}
# Load the dataset
X, y = load_mnist(n_samples=10000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=0)
results = []
for estimator_name, estimator in estimator_dict.items():
print(estimator_name + ': ', end='')
for max_iter in range(1, 50):
print('.', end='')
sys.stdout.flush()
fit_time, n_iter, train_score, test_score = fit_and_score(
estimator, max_iter, X_train, X_test, y_train, y_test)
results.append((estimator_name, max_iter, fit_time, n_iter,
train_score, test_score))
print('')
# Transform the results in a pandas dataframe for easy plotting
columns = [
'Stopping criterion', 'max_iter', 'Fit time (sec)', 'n_iter_',
'Train score', 'Test score'
]
results_df = pd.DataFrame(results, columns=columns)
# Define what to plot (x_axis, y_axis)
lines = 'Stopping criterion'
plot_list = [
('max_iter', 'Train score'),
('max_iter', 'Test score'),
('max_iter', 'n_iter_'),
('max_iter', 'Fit time (sec)'),
]
nrows = 2
ncols = int(np.ceil(len(plot_list) / 2.))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6 * ncols,
4 * nrows))
axes[0, 0].get_shared_y_axes().join(axes[0, 0], axes[0, 1])
for ax, (x_axis, y_axis) in zip(axes.ravel(), plot_list):
for criterion, group_df in results_df.groupby(lines):
group_df.plot(x=x_axis, y=y_axis, label=criterion, ax=ax)
ax.set_title(y_axis)
ax.legend(title=lines)
fig.tight_layout()
plt.show()
| bsd-3-clause |
metamx/spark | python/pyspark/sql/dataframe.py | 10 | 64367 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Experimental.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Experimental
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20))
else:
print(self._jdf.showString(n, int(truncate)))
def __repr__(self):
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this DataFrame, which is especially useful in iterative algorithms where the
plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with L{SparkContext.setCheckpointDir()}.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Experimental
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.collectToPython()
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.toPythonIterator()
return _load_from_socket(port, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions.
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
``numPartitions`` can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement, fraction, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> df.sample(False, 0.5, 42).count()
2
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd = self._jdf.sample(withReplacement, fraction, long(seed))
return DataFrame(rdd, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, str):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default 'inner'.
One of `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this
frame and another frame.
.. note:: Deprecated in 2.0, use union instead.
"""
return self.union(other)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, dict)):
raise ValueError("value should be a float, int, long, string, or dict")
if isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
:param to_replace: int, long, float, string, or list.
Value to be replaced.
If the value is a dict, then `value` is ignored and `to_replace` must be a
mapping from column name (string) to replacement value. The value to be
replaced must be an int, long, float, or string.
:param value: int, long, float, string, or list.
Value to use to replace holes.
The replacement value must be an int, long, float, or string. If `value` is a
list or tuple, `value` should be of the same length with `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if not isinstance(to_replace, (float, int, long, basestring, list, tuple, dict)):
raise ValueError(
"to_replace should be a float, int, long, string, list, tuple, or dict")
if not isinstance(value, (float, int, long, basestring, list, tuple)):
raise ValueError("value should be a float, int, long, string, list, or tuple")
rep_dict = dict()
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, tuple):
to_replace = list(to_replace)
if isinstance(value, tuple):
value = list(value)
if isinstance(to_replace, list) and isinstance(value, list):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length")
rep_dict = dict(zip(to_replace, value))
elif isinstance(to_replace, list) and isinstance(value, (float, int, long, basestring)):
rep_dict = dict([(tr, value) for tr in to_replace])
elif isinstance(to_replace, dict):
rep_dict = to_replace
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
elif isinstance(subset, basestring):
subset = [subset]
if not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of a numerical column of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
:param col: the name of the numerical column
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities
"""
if not isinstance(col, str):
raise ValueError("col should be a string.")
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
return list(jaq)
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param col: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. note:: This method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
import pandas as pd
return pd.DataFrame.from_records(self.collect(), columns=self.columns)
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
petosegan/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
codrut3/tensorflow | tensorflow/examples/learn/iris_run_config.py | 76 | 2565 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. tf_random_seed.
run_config = tf.estimator.RunConfig().replace(tf_random_seed=1)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3,
config=run_config)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
s20121035/rk3288_android5.1_repo | cts/suite/audio_quality/test_description/processing/check_spectrum.py | 5 | 5840 | #!/usr/bin/python
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from consts import *
import numpy as np
import scipy as sp
import scipy.fftpack as fft
import matplotlib.pyplot as plt
import sys
sys.path.append(sys.path[0])
import calc_delay
# check if amplitude ratio of DUT / Host signal
# lies in the given error boundary
# input: host record
# device record,
# sampling rate
# low frequency in Hz,
# high frequency in Hz,
# allowed error in negative side for pass in %,
# allowed error ih positive side for pass
# output: min value in negative side, normalized to 1.0
# max value in positive side
# calculated amplittude ratio in magnitude (DUT / Host)
def do_check_spectrum(hostData, DUTData, samplingRate, fLow, fHigh, margainLow, margainHigh):
# reduce FFT resolution to have averaging effects
N = 512 if (len(hostData) > 512) else len(hostData)
iLow = N * fLow / samplingRate + 1 # 1 for DC
if iLow > (N / 2 - 1):
iLow = (N / 2 - 1)
iHigh = N * fHigh / samplingRate + 1 # 1 for DC
if iHigh > (N / 2 + 1):
iHigh = N / 2 + 1
print fLow, iLow, fHigh, iHigh, samplingRate
Phh, freqs = plt.psd(hostData, NFFT=N, Fs=samplingRate, Fc=0, detrend=plt.mlab.detrend_none,\
window=plt.mlab.window_hanning, noverlap=0, pad_to=None, sides='onesided',\
scale_by_freq=False)
Pdd, freqs = plt.psd(DUTData, NFFT=N, Fs=samplingRate, Fc=0, detrend=plt.mlab.detrend_none,\
window=plt.mlab.window_hanning, noverlap=0, pad_to=None, sides='onesided',\
scale_by_freq=False)
print len(Phh), len(Pdd)
print "Phh", abs(Phh[iLow:iHigh])
print "Pdd", abs(Pdd[iLow:iHigh])
amplitudeRatio = np.sqrt(abs(Pdd[iLow:iHigh]/Phh[iLow:iHigh]))
ratioMean = np.mean(amplitudeRatio)
amplitudeRatio = amplitudeRatio / ratioMean
print "Normialized ratio", amplitudeRatio
print "ratio mean for normalization", ratioMean
positiveMax = abs(max(amplitudeRatio))
negativeMin = abs(min(amplitudeRatio))
passFail = True if (positiveMax < (margainHigh / 100.0 + 1.0)) and\
((1.0 - negativeMin) < margainLow / 100.0) else False
RatioResult = np.zeros(len(amplitudeRatio), dtype=np.int16)
for i in range(len(amplitudeRatio)):
RatioResult[i] = amplitudeRatio[i] * 1024 # make fixed point
print "positiveMax", positiveMax, "negativeMin", negativeMin
return (passFail, negativeMin, positiveMax, RatioResult)
def toMono(stereoData):
n = len(stereoData)/2
monoData = np.zeros(n)
for i in range(n):
monoData[i] = stereoData[2 * i]
return monoData
def check_spectrum(inputData, inputTypes):
output = []
outputData = []
outputTypes = []
# basic sanity check
inputError = False
if (inputTypes[0] != TYPE_MONO) and (inputTypes[0] != TYPE_STEREO):
inputError = True
if (inputTypes[1] != TYPE_MONO) and (inputTypes[1] != TYPE_STEREO):
inputError = True
if (inputTypes[2] != TYPE_I64):
inputError = True
if (inputTypes[3] != TYPE_I64):
inputError = True
if (inputTypes[4] != TYPE_I64):
inputError = True
if (inputTypes[5] != TYPE_DOUBLE):
inputError = True
if (inputTypes[6] != TYPE_DOUBLE):
inputError = True
if inputError:
print "input error"
output.append(RESULT_ERROR)
output.append(outputData)
output.append(outputTypes)
return output
hostData = inputData[0]
if inputTypes[0] == TYPE_STEREO:
hostData = toMono(hostData)
dutData = inputData[1]
if inputTypes[1] == TYPE_STEREO:
dutData = toMono(dutData)
samplingRate = inputData[2]
fLow = inputData[3]
fHigh = inputData[4]
margainLow = inputData[5]
margainHigh = inputData[6]
delay = 0
N = 0
hostData_ = hostData
dutData_ = dutData
if len(hostData) > len(dutData):
delay = calc_delay.calc_delay(hostData, dutData)
N = len(dutData)
hostData_ = hostData[delay:delay+N]
if len(hostData) < len(dutData):
delay = calc_delay.calc_delay(dutData, hostData)
N = len(hostData)
dutData_ = dutData[delay:delay+N]
print "delay ", delay, "deviceRecording samples ", N
(passFail, minError, maxError, TF) = do_check_spectrum(hostData_, dutData_,\
samplingRate, fLow, fHigh, margainLow, margainHigh)
if passFail:
output.append(RESULT_PASS)
else:
output.append(RESULT_OK)
outputData.append(minError)
outputTypes.append(TYPE_DOUBLE)
outputData.append(maxError)
outputTypes.append(TYPE_DOUBLE)
outputData.append(TF)
outputTypes.append(TYPE_MONO)
output.append(outputData)
output.append(outputTypes)
return output
# test code
if __name__=="__main__":
sys.path.append(sys.path[0])
mod = __import__("gen_random")
peakAmpl = 10000
durationInMSec = 1000
samplingRate = 44100
fLow = 500
fHigh = 15000
data = getattr(mod, "do_gen_random")(peakAmpl, durationInMSec, samplingRate, fHigh,\
stereo=False)
print len(data)
(passFail, minVal, maxVal, ampRatio) = do_check_spectrum(data, data, samplingRate, fLow, fHigh,\
1.0, 1.0)
plt.plot(ampRatio)
plt.show()
| gpl-3.0 |
aigamedev/scikit-neuralnetwork | sknn/tests/test_classifier.py | 3 | 6807 | import unittest
from nose.tools import (assert_is_not_none, assert_true, assert_raises,
assert_in, assert_equal, assert_less_equal)
import numpy
from sklearn.base import clone
from sknn.mlp import Classifier as MLPC
from sknn.mlp import Layer as L, Convolution as C
class TestClassifierFunctionality(unittest.TestCase):
def setUp(self):
self.nn = MLPC(layers=[L("Softmax")], n_iter=1)
def test_IsClassifier(self):
assert_true(self.nn.is_classifier)
def test_FitAutoInitialize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
assert_true(self.nn.is_initialized)
def test_ExplicitValidSet(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.valid_set = (a_in, a_out)
self.nn.fit(a_in, a_out)
assert_true(self.nn.is_initialized)
def test_PartialFit(self):
a_in, a_out = numpy.zeros((8,4)), numpy.random.randint(0, 5, (8,))
self.nn.partial_fit(a_in, a_out, classes=[0,1,2,3])
self.nn.partial_fit(a_in*2.0, a_out+1, classes=[0,1,2,3])
def test_PredictUninitializedNoUnitCount(self):
a_in = numpy.zeros((8,16))
assert_raises(AssertionError, self.nn.predict, a_in)
def test_PredictUninitializedNoLabels(self):
self.nn.layers[-1].units = 4
a_in = numpy.zeros((8,16))
assert_raises(AssertionError, self.nn.predict, a_in)
def test_PredictBinaryProbability(self):
a_in = numpy.random.uniform(-1.0, 1.0, size=(8,16))
a_out = numpy.array((a_in.sum(axis=1) >= 0.0), dtype=numpy.int32)
a_out[0], a_out[-1] = 0, 1
self.nn.fit(a_in, a_out)
a_proba = self.nn.predict_proba(a_in)
a_test = self.nn.predict(a_in)
c_out = numpy.unique(a_out)
assert_equal(2, c_out.shape[0])
assert_equal((8, 2), a_proba.shape)
assert_true((a_proba >= 0.0).all())
assert_true((a_proba <= 1.0).all())
assert_true((abs(a_proba.sum(axis=1) - 1.0) < 1E-9).all())
def test_PredictClasses(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
self.nn.batch_size = 4
a_test = self.nn.predict(a_in)
assert_equal(type(a_out), type(a_test))
assert_equal(a_out.shape[0], a_test.shape[0])
c_out = numpy.unique(a_out)
assert_equal(len(self.nn.classes_), 1)
assert_true((self.nn.classes_[0] == c_out).all())
def test_PredictLargerBatchSize(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,1))
self.nn.batch_size = 32
self.nn.fit(a_in, a_out)
a_test = self.nn.predict(a_in)
assert_equal(type(a_out), type(a_test))
assert_equal(a_out.shape[0], a_test.shape[0])
def test_PredictMultiClass(self):
a_in, a_out = numpy.zeros((32,16)), numpy.random.randint(0, 3, (32,2))
self.nn.fit(a_in, a_out)
a_test = self.nn.predict(a_in)
assert_equal(type(a_out), type(a_test))
assert_equal(a_out.shape, a_test.shape)
assert_equal(len(self.nn.classes_), 2)
assert_equal(self.nn.classes_[0].shape[0], 3)
assert_equal(self.nn.classes_[1].shape[0], 3)
def test_EstimateProbalities(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
a_proba = self.nn.predict_proba(a_in)
assert_equal(type(a_out), type(a_proba))
assert_equal(a_in.shape[0], a_proba.shape[0])
assert_true((a_proba >= 0.0).all())
assert_true((a_proba <= 1.0).all())
assert_true((abs(a_proba.sum(axis=1) - 1.0) < 1E-9).all())
def test_MultipleProbalitiesAsList(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,4))
self.nn.fit(a_in, a_out)
a_proba = self.nn.predict_proba(a_in)
assert_equal(list, type(a_proba))
assert_equal(4, len(a_proba))
for p in a_proba:
assert_equal(a_in.shape[0], p.shape[0])
assert_less_equal(p.shape[1], 5)
assert_true((p >= 0.0).all())
assert_true((p <= 1.0).all())
assert_true((abs(p.sum(axis=1) - 1.0) < 1E-9).all())
def test_CalculateScore(self):
a_in, a_out = numpy.zeros((8,16)), numpy.random.randint(0, 5, (8,))
self.nn.fit(a_in, a_out)
f = self.nn.score(a_in, a_out)
assert_equal(type(f), numpy.float64)
class TestClassifierClone(TestClassifierFunctionality):
def setUp(self):
cc = MLPC(layers=[L("Sigmoid")], n_iter=1)
self.nn = clone(cc)
# This runs the same tests on the clone as for the original above.
class TestClassifierInterface(unittest.TestCase):
def check_values(self, params):
assert_equal(params['learning_rate'], 0.05)
assert_equal(params['n_iter'], 456)
assert_equal(params['n_stable'], 123)
assert_equal(params['dropout_rate'], 0.25)
assert_equal(params['regularize'], 'dropout')
assert_equal(params['valid_size'], 0.2)
def test_GetParamValues(self):
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout_rate=0.25)
params = nn.get_params()
self.check_values(params)
def test_CloneWithValues(self):
nn = MLPC(layers=[L("Linear")], learning_rate=0.05, n_iter=456,
n_stable=123, valid_size=0.2, dropout_rate=0.25)
cc = clone(nn)
params = cc.get_params()
self.check_values(params)
def check_defaults(self, params):
assert_equal(params['learning_rate'], 0.01)
assert_equal(params['n_iter'], None)
assert_equal(params['n_stable'], 10)
assert_equal(params['regularize'], None)
assert_equal(params['valid_size'], 0.0)
def test_GetParamDefaults(self):
nn = MLPC(layers=[L("Gaussian")])
params = nn.get_params()
self.check_defaults(params)
def test_CloneDefaults(self):
nn = MLPC(layers=[L("Gaussian")])
cc = clone(nn)
params = cc.get_params()
self.check_defaults(params)
def test_ConvertToString(self):
nn = MLPC(layers=[L("Gaussian")])
assert_equal(str, type(str(nn)))
def test_RepresentationDenseLayer(self):
nn = MLPC(layers=[L("Gaussian")])
r = repr(nn)
assert_equal(str, type(r))
assert_in("sknn.nn.Layer `Gaussian`", r)
def test_RepresentationConvolution(self):
nn = MLPC(layers=[C("Rectifier")])
r = repr(nn)
assert_equal(str, type(r))
assert_in("sknn.nn.Convolution `Rectifier`", r)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
rikima/spark | python/pyspark/sql/types.py | 2 | 65691 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
import ctypes
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark import SparkContext
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType),\
"elementType %s should be an instance of %s" % (elementType, DataType)
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType),\
"keyType %s should be an instance of %s" % (keyType, DataType)
assert isinstance(valueType, DataType),\
"valueType %s should be an instance of %s" % (valueType, DataType)
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType),\
"dataType %s should be an instance of %s" % (dataType, DataType)
assert isinstance(name, basestring), "field name %s should be string" % (name)
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
def typeName(self):
raise TypeError(
"StructField does not have typeName. "
"Use typeName on its type explicitly instead.")
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`\\s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def fieldNames(self):
"""
Returns all field names in a list.
>>> struct = StructType([StructField("f1", StringType(), True)])
>>> struct.fieldNames()
['f1']
"""
return list(self.names)
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
# Only calling toInternal function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
# Only calling fromInternal function for fields that need conversion
values = [f.fromInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile("decimal\\(\\s*(\\d+)\\s*,\\s*(\\d+)\\s*\\)")
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted
string and case-insensitive strings.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("INT ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a DOUBLE, b STRING")
StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
"""
sc = SparkContext._active_spark_context
def from_ddl_schema(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json())
def from_ddl_datatype(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json())
try:
# DDL format, "fieldname datatype, fieldname datatype".
return from_ddl_schema(s)
except Exception as e:
try:
# For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc.
return from_ddl_datatype(s)
except:
try:
# For backwards compatibility, "fieldname: datatype, fieldname: datatype" case.
return from_ddl_datatype("struct<%s>" % s.strip())
except:
raise e
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
# Mapping Python array types to Spark SQL DataType
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it required 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType,
'd': DoubleType
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
_array_type_mappings['u'] = StringType
# Type code 'c' are only available at python 2
if sys.version_info[0] < 3:
_array_type_mappings['c'] = StringType
# SPARK-21465:
# In python2, array of 'L' happened to be mistakenly partially supported. To
# avoid breaking user's code, we should keep this partial support. Below is a
# dirty hacking to keep this partial support and make the unit test passes
import platform
if sys.version_info[0] < 3 and platform.python_implementation() != 'PyPy':
if 'L' not in _array_type_mappings.keys():
_array_type_mappings['L'] = LongType
_array_unsigned_int_typecode_ctype_mappings['L'] = ctypes.c_uint
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
else:
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
else:
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType()),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType,
name='element in array %s' % name), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType, name='key of map %s' % name),
_merge_type(a.valueType, b.valueType, name='value of map %s' % name),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not checked, so it will become infinity when cast to Java float if it overflows.
>>> _make_type_verifier(StructType([]))(None)
>>> _make_type_verifier(StringType())("")
>>> _make_type_verifier(LongType())(0)
>>> _make_type_verifier(ArrayType(ShortType()))(list(range(3)))
>>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({})
>>> _make_type_verifier(StructType([]))(())
>>> _make_type_verifier(StructType([]))([])
>>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _make_type_verifier(ByteType())(12)
>>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(
... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(dataType)
def assert_acceptable_types(obj):
assert _type in _acceptable_types, \
new_msg("unknown datatype: %s for object %r" % (dataType, obj))
def verify_acceptable_types(obj):
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (dataType, obj, type(obj))))
if isinstance(dataType, StringType):
# StringType can work with any types
verify_value = lambda _: _
elif isinstance(dataType, UserDefinedType):
verifier = _make_type_verifier(dataType.sqlType(), name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType)))
verifier(dataType.toInternal(obj))
verify_value = verify_udf
elif isinstance(dataType, ByteType):
def verify_byte(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj))
verify_value = verify_byte
elif isinstance(dataType, ShortType):
def verify_short(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj))
verify_value = verify_short
elif isinstance(dataType, IntegerType):
def verify_integer(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntegerType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(dataType, ArrayType):
element_verifier = _make_type_verifier(
dataType.elementType, dataType.containsNull, name="element in array %s" % name)
def verify_array(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(dataType, MapType):
key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name)
value_verifier = _make_type_verifier(
dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name)
def verify_map(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(dataType, StructType):
verifiers = []
for f in dataType.fields:
verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_struct(obj):
assert_acceptable_types(obj)
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("StructType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_struct
else:
def verify_default(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) == TimestampType:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
from distutils.version import LooseVersion
import pyarrow as pa
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at) +
"\nPlease install pyarrow >= 0.10.0 for BinaryType support.")
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _check_series_convert_date(series, data_type):
"""
Cast the series to datetime.date if it's a date type, otherwise returns the original series.
:param series: pandas.Series
:param data_type: a Spark data type for the series
"""
if type(data_type) == DateType:
return series.dt.date
else:
return series
def _check_dataframe_convert_date(pdf, schema):
""" Correct date type value to use datetime.date.
Pandas DataFrame created from PyArrow uses datetime64[ns] for date type values, but we should
use datetime.date to match the behavior with when Arrow optimization is disabled.
:param pdf: pandas.DataFrame
:param schema: a Spark schema of the pandas.DataFrame
"""
for field in schema:
pdf[field.name] = _check_series_convert_date(pdf[field.name], field.dataType)
return pdf
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert from. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
binarybana/samcnet | exps/mpm.py | 1 | 10095 | import os
import sys
import tempfile
import yaml
import zlib
import numpy as np
import simplejson as js
import subprocess as sb
from time import time,sleep
from os import path
from scipy.stats.mstats import mquantiles
try:
from sklearn.lda import LDA
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.feature_selection import SelectKBest, f_classif
import samcnet.mh as mh
import samcnet.samc as samc
from samcnet.mixturepoisson import *
from samcnet.lori import *
except ImportError as e:
sys.exit("Make sure LD_LIBRARY_PATH is set correctly and that the build"+\
" directory is populated by waf.\n\n %s" % str(e))
if 'WORKHASH' in os.environ:
try:
server = os.environ['SERVER']
except:
sys.exit("ERROR in worker: Need SERVER environment variable defined.")
if 'PARAM' in os.environ:
params = yaml.load(os.environ['PARAM'])
else:
params = {}
def setv(p,s,d,conv=None):
if s not in p:
p[s] = str(d)
return d
elif conv is not None:
return conv(p[s])
else:
p[s]
iters = setv(params, 'iters', int(1e4), int)
num_feat = setv(params, 'num_feat', 4, int)
#seed = setv(params, 'seed', 1234, int)
seed = setv(params, 'seed', np.random.randint(10**8), int)
rseed = setv(params, 'rseed', np.random.randint(10**8), int)
Ntrn = setv(params, 'Ntrn', 3, int)
Ntst = setv(params, 'Ntst', 3000, int)
f_glob = setv(params, 'f_glob', 2, int)
subclasses = setv(params, 'subclasses', 2, int)
f_het = setv(params, 'f_het', 1, int)
f_rand = setv(params, 'f_rand', 0, int)
rho = setv(params, 'rho', 0.6, float)
f_tot = setv(params, 'f_tot', f_glob+f_het*subclasses+f_rand, int)
blocksize = setv(params, 'blocksize', 1, int)
mu0 = setv(params, 'mu0', -1.2, float)
mu1 = setv(params, 'mu1', -0.2, float)
sigma0 = setv(params, 'sigma0', 0.5, float)
sigma1 = setv(params, 'sigma1', 0.2, float)
S = setv(params, 'S', 10.0, float)
lowd = setv(params, 'lowd', 9.0, float)
highd = setv(params, 'highd', 11.0, float)
numlam = setv(params, 'numlam', 20, int)
output = {}
output['errors'] = {}
errors = output['errors']
np.seterr(all='ignore') # Careful with this
t1 = time()
#trn_data, trn_labels, tst_data, tst_labels = data_yj(params)
trn_data, trn_labels, tst_data, tst_labels = data_jason(params)
norm_trn_data = norm(trn_data)
norm_tst_data = norm(tst_data)
norm_trn_data0, norm_trn_data1 = split(norm_trn_data)
norm_tst_data0, norm_tst_data1 = split(norm_tst_data)
trn_data0, trn_data1 = split(trn_data)
tst_data0, tst_data1 = split(tst_data)
#################### CLASSIFICATION ################
sklda = LDA()
skknn = KNN(3)
sksvm = SVC()
sklda.fit(norm_trn_data, trn_labels)
skknn.fit(norm_trn_data, trn_labels)
sksvm.fit(norm_trn_data, trn_labels)
errors['lda'] = (1-sklda.score(norm_tst_data, tst_labels))
errors['knn'] = (1-skknn.score(norm_tst_data, tst_labels))
errors['svm'] = (1-sksvm.score(norm_tst_data, tst_labels))
bayes0 = GaussianBayes(np.zeros(num_feat), 1, 8, np.eye(num_feat)*3, norm_trn_data0)
bayes1 = GaussianBayes(np.zeros(num_feat), 1, 8, np.eye(num_feat)*3, norm_trn_data1)
# Gaussian Analytic
gc = GaussianCls(bayes0, bayes1)
errors['gauss'] = gc.approx_error_data(norm_tst_data, tst_labels)
# MPM Model
#d0 = np.asarray(mquantiles(trn_data0, 0.75, axis=1)).reshape(-1)
#d1 = np.asarray(mquantiles(trn_data1, 0.75, axis=1)).reshape(-1)
#dist0 = MPMDist(trn_data0,kmax=1,priorkappa=150,lammove=0.01,mumove=0.08,d=d0)
#dist1 = MPMDist(trn_data1,kmax=1,priorkappa=150,lammove=0.01,mumove=0.08,d=d1)
up = True
kappa = 10.0
S = np.eye(4) * 0.4 * (kappa - 1 - 4)
dist0 = MPMDist(trn_data0,kmax=1,priorkappa=200,lammove=0.05,mumove=0.08,usepriors=up,
kappa=kappa, S=S)
dist1 = MPMDist(trn_data1,kmax=1,priorkappa=200,lammove=0.05,mumove=0.08,usepriors=up,
kappa=kappa, S=S)
mpm1 = MPMCls(dist0, dist1)
mhmc1 = mh.MHRun(mpm1, burn=2000, thin=50)
mhmc1.sample(iters,verbose=False)
errors['mpm'] = mpm1.approx_error_data(mhmc1.db, tst_data, tst_labels,numlam=numlam)
print("")
print("skLDA error: %f" % errors['lda'])
print("skKNN error: %f" % errors['knn'])
print("skSVM error: %f" % errors['svm'])
print("gauss error: %f" % errors['gauss'])
print("my MP error: %f" % errors['mpm'])
#n,gext,grid = get_grid_data(np.vstack(( trn_data0, trn_data1 )), positive=True)
#def myplot(ax,g,data0,data1,gext):
#ax.plot(data0[:,0], data0[:,1], 'g.',label='0', alpha=0.5)
#ax.plot(data1[:,0], data1[:,1], 'r.',label='1', alpha=0.5)
#ax.legend(fontsize=8, loc='best')
##im = ax.imshow(g, extent=gext, aspect=1.0, origin='lower')
##p.colorbar(im,ax=ax)
#ax.contour(g, [0.0], extent=gext, aspect=1.0, origin='lower', cmap = p.cm.gray)
#p.close("all")
#gavg = mpm1.calc_gavg(mhmc1.db, grid, numlam=numlam).reshape(-1,n)
##myplot(p.subplot(3,1,1),gavg,trn_data0,trn_data1,gext)
#g0 = mpm1.dist0.calc_db_g(mhmc1.db, mhmc1.db.root.object.dist0, grid)
#g1 = mpm1.dist1.calc_db_g(mhmc1.db, mhmc1.db.root.object.dist1, grid)
##def jitter(x):
##rand = np.random.rand
##n = x.shape[0]
##return (x.T + rand(n)).T
#def jitter(x):
#rand = np.random.rand
#return x + rand(*x.shape)-0.5
##myplot(p.subplot(3,1,3),err.reshape(-1,n),jitter(tst_data0),jitter(tst_data1),gext)
#def plot_all(n, gext, grid, data0, data1, g0, g1, gavg):
#Z = np.exp(g0)+np.exp(g1)
#eg0 = np.exp(g0)/Z
#eg1 = np.exp(g1)/Z
#err = np.minimum(eg0,eg1)
#err = err.reshape(-1,n)
#lx,hx,ly,hy = gext
#asp = float(hx-lx) / (hy-ly)
#alp = 1.0
#ms = 8
#p.figure()
#p.subplot(2,2,1)
#p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms, alpha=alp)
#p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms, alpha=alp)
#p.legend(fontsize=8, loc='best')
##p.contour(gavg, extent=gext, aspect=1, origin='lower', cmap = p.cm.gray)
##p.contour(gavg, [0.0], extent=gext, aspect=1, origin='lower', cmap = p.cm.gray)
##p.imshow(gavg, extent=gext, aspect=1, origin='lower')
##p.imshow(g0.reshape(-1,n), extent=gext, aspect=asp, origin='lower')
##p.colorbar()
#p.contour(g0.reshape(-1,n), extent=gext, aspect=asp, origin='lower', cmap = p.cm.Greens)
#p.subplot(2,2,2)
#p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms, alpha=alp)
#p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms, alpha=alp)
#p.legend(fontsize=8, loc='best')
##p.contour(g0.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Greens)
##p.contour(g1.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Reds)
##p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=1, origin='lower', cmap = p.cm.gray)
##p.imshow((g1-g0).reshape(-1,n), extent=gext, aspect=1, origin='lower')
##p.imshow(g1.reshape(-1,n), extent=gext, aspect=asp, origin='lower')
##p.colorbar()
#p.contour(g1.reshape(-1,n), extent=gext, aspect=asp, origin='lower', cmap = p.cm.Reds)
#p.subplot(2,2,3)
#p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms, alpha=alp)
#p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms, alpha=alp)
#p.legend(fontsize=8, loc='best')
##p.imshow(err, extent=gext, origin='lower', aspect=asp)
##p.colorbar()
#p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
##p.contour(eg0.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Greens)
##p.contour(eg1.reshape(-1,n), extent=gext, aspect=1, origin='lower', cmap = p.cm.Reds)
#p.subplot(2,2,4)
#p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms)
#p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms)
#p.legend(fontsize=8, loc='best')
#p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
#CS = p.contour(err, [0.4, 0.3, 0.2, 0.1, 0.05], extent=gext, aspect=asp, origin='lower')
#p.clabel(CS, inline=1, fontsize=10, aspect=asp)
#p.show()
#def plot_concise(n, gext, grid, data0, data1, g0, g1, gavg):
#p.figure()
#Z = np.exp(g0)+np.exp(g1)
#eg0 = np.exp(g0)/Z
#eg1 = np.exp(g1)/Z
#err = np.minimum(eg0,eg1)
#err = err.reshape(-1,n)
#ms=8
#lx,hx,ly,hy = gext
#asp = float(hx-lx) / (hy-ly)
#p.plot(data0[:,0], data0[:,1], 'g^',label='0', markersize=ms)
#p.plot(data1[:,0], data1[:,1], 'ro',label='1', markersize=ms)
#p.legend(fontsize=8, loc='best')
#cont = (g0.max() + g1.max()) / 2.0 - 0.6
#p.contour(g0.reshape(-1,n), [cont], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
#p.contour(g1.reshape(-1,n), [cont], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray)
#p.imshow(err, extent=gext, origin='lower', aspect=asp, alpha=0.4, cmap = p.cm.Reds)
#p.contour((g1-g0).reshape(-1,n), [0.0], extent=gext, aspect=asp, origin='lower', cmap = p.cm.gray, linewidth=15.0)
#CS = p.contour(err, [0.4, 0.3, 0.2, 0.1, 0.05], extent=gext, aspect=asp, origin='lower')
#p.clabel(CS, inline=1, fontsize=10, aspect=asp)
#p.show()
#plot_all(n, gext, grid, trn_data0, trn_data1, g0,g1,gavg)
#plot_concise(n, gext, grid, trn_data0, trn_data1, g0,g1,gavg)
##n,gext,grid = get_grid_data(np.vstack(( norm_trn_data0, norm_trn_data1 )), positive=False)
##myplot(p.subplot(3,1,3),sksvm.decision_function(grid).reshape(-1,n),norm_trn_data0,norm_trn_data1,gext)
#p.figure()
#myplot(p.subplot(1,1,1),gavg,jitter(tst_data0),jitter(tst_data1),gext)
#p.axis(gext)
#mpm1.dist0.plot_traces(mhmc1.db, '/object/dist0', ['sigma'])
#p.show()
output['seed'] = seed
output['time'] = time()-t1
output['acceptance'] = float(mhmc1.accept_loc)/mhmc1.total_loc
if 'WORKHASH' in os.environ:
import zmq
ctx = zmq.Context()
socket = ctx.socket(zmq.REQ)
socket.connect('tcp://'+server+':7000')
wiredata = zlib.compress(js.dumps(output))
#wiredata = s.read_db()
socket.send(os.environ['WORKHASH'], zmq.SNDMORE)
socket.send(wiredata)
socket.recv()
socket.close()
ctx.term()
mhmc1.db.close()
| mit |
o0neup/ibis | ibis/util.py | 5 | 4572 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pandas.core.common as pdcom
import ibis
from ibis.common import IbisTypeError
def guid():
try:
from ibis.comms import uuid4_hex
return uuid4_hex()
except ImportError:
from uuid import uuid4
return uuid4().get_hex()
def bytes_to_uint8_array(val, width=70):
"""
Formats a byte string for use as a uint8_t* literal in C/C++
"""
if len(val) == 0:
return '{}'
lines = []
line = '{' + str(ord(val[0]))
for x in val[1:]:
token = str(ord(x))
if len(line) + len(token) > width:
lines.append(line + ',')
line = token
else:
line += ',%s' % token
lines.append(line)
return '\n'.join(lines) + '}'
def unique_by_key(values, key):
id_to_table = {}
for x in values:
id_to_table[key(x)] = x
return id_to_table.values()
def indent(text, spaces):
block = ' ' * spaces
return '\n'.join(block + x for x in text.split('\n'))
def any_of(values, t):
for x in values:
if isinstance(x, t):
return True
return False
def all_of(values, t):
for x in values:
if not isinstance(x, t):
return False
return True
def promote_list(val):
if not isinstance(val, list):
val = [val]
return val
class IbisSet(object):
def __init__(self, keys=None):
self.keys = keys or []
@classmethod
def from_list(cls, keys):
return IbisSet(keys)
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def add(self, obj):
self.keys.append(obj)
class IbisMap(object):
def __init__(self):
self.keys = []
self.values = []
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def set(self, key, value):
self.keys.append(key)
self.values.append(value)
def get(self, key):
for k, v in zip(self.keys, self.values):
if key.equals(k):
return v
raise KeyError(key)
def pandas_col_to_ibis_type(col):
dty = col.dtype
# datetime types
if pdcom.is_datetime64_dtype(dty):
if pdcom.is_datetime64_ns_dtype(dty):
return 'timestamp'
else:
raise IbisTypeError(
"Column {0} has dtype {1}, which is datetime64-like but does "
"not use nanosecond units".format(col.name, dty))
if pdcom.is_timedelta64_dtype(dty):
print("Warning: encoding a timedelta64 as an int64")
return 'int64'
if pdcom.is_categorical_dtype(dty):
return 'category'
if pdcom.is_bool_dtype(dty):
return 'boolean'
# simple numerical types
if issubclass(dty.type, np.int8):
return 'int8'
if issubclass(dty.type, np.int16):
return 'int16'
if issubclass(dty.type, np.int32):
return 'int32'
if issubclass(dty.type, np.int64):
return 'int64'
if issubclass(dty.type, np.float32):
return 'float'
if issubclass(dty.type, np.float64):
return 'double'
if issubclass(dty.type, np.uint8):
return 'int16'
if issubclass(dty.type, np.uint16):
return 'int32'
if issubclass(dty.type, np.uint32):
return 'int64'
if issubclass(dty.type, np.uint64):
raise IbisTypeError("Column {0} is an unsigned int64".format(col.name))
if pdcom.is_object_dtype(dty):
# TODO: overly broad?
return 'string'
raise IbisTypeError("Column {0} is dtype {1}".format(col.name, dty))
def pandas_to_ibis_schema(frame):
# no analog for decimal in pandas
pairs = []
for col_name in frame:
ibis_type = pandas_col_to_ibis_type(frame[col_name])
pairs.append((col_name, ibis_type))
return ibis.schema(pairs)
| apache-2.0 |
pravsripad/jumeg | jumeg/decompose/ica.py | 2 | 35344 | # ICA functions
'''
authors:
Juergen Dammers
Lukas Breuer
email: j.dammers@fz-juelich.de
Change history:
21.01.2020: - changes in ica_array
- now returns an MNE-type of ICA object (default)
- in fastica changed default to whiten=False
- added function to convert any ica object to MNE-type
- name change: "activations" are now named "sources"
06.01.2020: added whitening option in PCA
17.10.2019: added mulitple useful functions to use ICA without MNE
27.11.2015 created by Lukas Breuer
'''
#######################################################
# #
# import necessary modules #
# #
#######################################################
from scipy.stats import kurtosis
import math
import numpy as np
from sys import stdout
from scipy.linalg import pinv
from copy import deepcopy
from mne.preprocessing.ica import _check_start_stop
from mne.utils.check import _check_preload
#######################################################
# #
# interface to perform (extended) Infomax ICA on #
# a data array #
# #
#######################################################
def ica_array(data_orig, dim_reduction='', explainedVar=1.0,
overwrite=None, return_ica_object=True,
max_pca_components=None, method='infomax',
cost_func='logcosh', weights=None, lrate=None,
block=None, wchange=1e-16, annealdeg=60.,
annealstep=0.9, n_subgauss=1, kurt_size=6000,
maxsteps=200, pca=None, whiten=False, verbose=True):
"""
interface to perform (extended) Infomax or FastICA on a data array
Parameters
----------
data_orig : array of data to be decomposed [nchan, ntsl].
Optional Parameters
-------------------
dim_reduction : {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
default: dim_reduction='' --> no dimension reduction is performed
as long as not the parameter
'max_pca_components' is set.
explainedVar : float
Value between 0 and 1; components will be selected by the
cumulative percentage of explained variance.
overwrite : if set the data array will be overwritten
(this saves memory)
default: overwrite=None
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data. Only of interest
when dim_reduction=''
method : {'fastica', 'infomax', 'extended-infomax'}
The ICA method to use. Defaults to 'infomax'.
whiten : bool, optional (default False)
When True the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
return_ica_object: bool, optional (default True)
When True an MNE-type of ICA object is returned (including PCA) besides the sources
When False the ICA unmixing matrix, the PCA obejct and the sources are returned
FastICA parameter:
-----------------------------
cost_func : String
Cost function to use in FastICA algorithm. Could be
either 'logcosh', 'exp' or 'cube'.
(Extended) Infomax parameter:
-----------------------------
weights : initialize weights matrix
default: None --> identity matrix is used
lrate : initial learning rate (for most applications 1e-3 is
a good start)
--> smaller learining rates will slowering the convergence
it merely indicates the relative size of the change in weights
default: lrate = 0.010d/alog(nchan^2.0)
block : his block size used to randomly extract (in time) a chop
of data
default: block = floor(sqrt(ntsl/3d))
wchange : iteration stops when weight changes are smaller then this
number
default: wchange = 1e-16
annealdeg : if angle delta is larger then annealdeg (in degree) the
learning rate will be reduce
default: annealdeg = 60
annealstep : the learning rate will be reduced by this factor:
lrate *= annealstep
default: annealstep = 0.9
extended : if set extended Infomax ICA is performed
default: None
n_subgauss : extended=int
The number of subgaussian components. Only considered for extended
Infomax.
default: n_subgauss=1
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
default: kurt_size=6000
maxsteps : maximum number of iterations to be done
default: maxsteps = 200
Returns
-------
weights : un-mixing matrix
pca : instance of PCA
Returns the instance of PCA where all information about the
PCA decomposition are stored.
sources: ICA sources
"""
# -------------------------------------------
# check overwrite option
# -------------------------------------------
if overwrite == None:
data = data_orig.copy()
else:
data = data_orig
# -------------------------------------------
# perform centering and whitening of the data
# - optional use the provided PCA object
# -------------------------------------------
if pca:
# perform centering and whitening
dmean = data.mean(axis=-1)
stddev = np.std(data, axis=-1)
dnorm = (data - dmean[:, np.newaxis])/stddev[:, np.newaxis]
data = np.dot(dnorm.T, pca.components_[:max_pca_components].T)
# update mean and standard-deviation in PCA object
pca.mean_ = dmean
pca.stddev_ = stddev
else:
if verbose:
print(" ... perform centering and whitening ...")
data, pca = whitening(data.T, dim_reduction=dim_reduction, npc=max_pca_components,
explainedVar=explainedVar, whiten=whiten)
# -------------------------------------------
# now call ICA algortithm
# -------------------------------------------
# FastICA
if method == 'fastica':
from sklearn.decomposition import fastica
# By Lukas, whitening was set to True. The new default is whiten=False
# However, whitening should not ne applied again, was whitened already (which is the default)
_, unmixing_, sources_ = fastica(data, fun=cost_func, max_iter=maxsteps, tol=1e-4, whiten=False)
sources = sources_.T
weights = unmixing_
# Infomax or extended Infomax
else:
if method == 'infomax':
extended = False
elif method == 'extended-infomax':
extended = True
else:
print(">>>> WARNING: Entered ICA method not found!")
print(">>>> Allowed are fastica, extended-infomax and infomax")
print(">>>> Using now the default ICA method which is Infomax")
extended = False
weights = infomax(data, weights=weights, l_rate=lrate, block=block,
w_change=wchange, anneal_deg=annealdeg, anneal_step=annealstep,
extended=extended, n_subgauss=n_subgauss, kurt_size=kurt_size,
max_iter=maxsteps, verbose=verbose)
sources = np.dot(weights, data.T)
# create an MNE-Python type of ICA object
# Note, when used with MNE functions the info dict needs
# to be manually added to the ica object, such as, ica.info = info
if return_ica_object:
ica = ica_convert2mne(weights, pca, method=method)
return ica, sources
else:
return weights, pca, sources
#######################################################
# #
# interface to perform (extended) Infomax ICA on #
# a data array #
# #
#######################################################
def infomax2data(unmixing, pca, sources, idx_zero=None):
"""
interface to perform (extended) Infomax ICA on a data array
Parameters
----------
unmixing: the ICA un-mixing (weight) matrix
pca : instance of PCA object
sources : underlying sources
idx_zero : indices of independent components (ICs) which
should be removed
default: idx_zero=None --> not IC is removed
Returns
-------
data : back-transformed cleaned data array
"""
# -------------------------------------------
# check dimension of the input data
# -------------------------------------------
npc = len(unmixing)
nchan = len(pca.components_)
ntsl = sources.shape[1]
# create array for principal components
pc = np.zeros((nchan, ntsl))
# -------------------------------------------
# backtransform data
# -------------------------------------------
mixing = pinv(unmixing)
if idx_zero is not None:
mixing[:, idx_zero] = 0.
pc[:npc] = np.dot(mixing, sources) # back-transform to PCA-space
data = np.dot(pca.components_.T, pc) # back-transform to data-space
del pc # delete principal components
data = (data * pca.stddev_[:, np.newaxis]) + pca.mean_[:, np.newaxis] # reverse normalization
# return results
return data
#######################################################
# #
# routine for PCA decomposition prior to ICA #
# #
#######################################################
def whitening(data, dim_reduction='',
npc=None, explainedVar=1.0, whiten=False):
"""
routine to perform whitening prior to Infomax ICA application
(whitening is based on Principal Component Analysis from the
RandomizedPCA package from sklearn.decomposition)
Parameters
----------
X : data array [ntsl, nchan] for decomposition.
dim_reduction : {'', 'AIC', 'BIC', 'GAP', 'MDL', 'MIBS', 'explVar'}
Method for dimension selection. For further information about
the methods please check the script 'dimension_selection.py'.
default: dim_reduction='' --> no dimension reduction is performed as
long as not the parameter 'npc' is set.
npc : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data. Only of interest
when dim_reduction=''
default: npc = None
explainedVar : float | None
Must be between 0 and 1. If float, the number of components
selected matches the number of components with a cumulative
explained variance of 'explainedVar'
default: explainedVar = None
whiten : bool, optional (default False)
When True the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
Returns
-------
whitened_data : data array [nchan, ntsl] of decomposed sources
ica : instance of ICA
Returns the instance of ICA where all information about the
PCA decomposition are updated.
sel : array containing the indices of the selected ICs
(depends on the variable npc)
"""
# -------------------------------------------
# import necessary modules
# -------------------------------------------
from sklearn.decomposition import PCA
from . import dimension_selection as dim_sel
# -------------------------------------------
# check input data
# -------------------------------------------
ntsl, nchan = data.shape
if (nchan < 2) or (ntsl < nchan):
raise ValueError('Data size too small!')
# -------------------------------------------
# perform PCA decomposition
# -------------------------------------------
X = data.copy()
# whiten = False
dmean = X.mean(axis=0)
stddev = np.std(X, axis=0)
X = (X - dmean[np.newaxis, :]) / stddev[np.newaxis, :]
pca = PCA(n_components=None, whiten=whiten, svd_solver='auto', copy=True)
# -------------------------------------------
# perform whitening
# -------------------------------------------
whitened_data = pca.fit_transform(X)
# -------------------------------------------
# update PCA structure
# -------------------------------------------
pca.mean_ = dmean
pca.stddev_ = stddev
# -------------------------------------------
# check dimension selection
# -------------------------------------------
if dim_reduction == 'AIC':
npc, _ = dim_sel.aic(pca.explained_variance_)
elif dim_reduction == 'BIC':
npc = dim_sel.mibs(pca.explained_variance_, ntsl)
elif dim_reduction == 'GAP':
npc = dim_sel.gap(pca.explained_variance_)
elif dim_reduction == 'MDL':
_, npc = dim_sel.mdl(pca.explained_variance_)
elif dim_reduction == 'MIBS':
npc = dim_sel.mibs(pca.explained_variance_, ntsl)
elif dim_reduction == 'explVar':
npc = dim_sel.explVar(pca.explained_variance_,explainedVar)
elif npc is None:
npc = nchan
# return results
# return whitened_data[:, :(npc + 1)], pca
return whitened_data[:, :(npc)], pca
#######################################################
# #
# real Infomax implementation #
# #
#######################################################
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
fixed_random_state=None, verbose=None):
"""
Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
"""
# define some default parameter
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 200
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0: # allow not to recompute kurtosis
n_subgauss = 1 # but initialize n_subgauss to 1 if you recompute
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
if verbose:
print('computing%sInfomax ICA' % ' Extended ' if extended is True else ' ')
# collect parameter
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
# initialize weights as identity matrix
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
# for extended Infomax
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
# shuffle data at each step
if fixed_random_state:
np.random.seed(step) # --> permutation is fixed but differs at each step
else:
np.random.seed(None)
permute = list(range(n_samples))
np.random.shuffle(permute)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(np.random.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if verbose:
info = "\r" if step > 0 else ""
info = ">>> Step %4d of %4d; wchange: %1.4e\n" % (step, max_iter, change)
stdout.write(info)
stdout.flush()
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reached l_rates
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angle delta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up
# (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
# for extended Infomax
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
print('... lowering learning rate to %g \n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
# return ICA unmixing matrix
return weights.T # after transpose shape corresponds to [n_features, n_samples]
#######################################################
#
# ica_convert2mne:
# - create a MNE-type of ICA object
# - include pca object into ica object
# - define entries as used by MNE-Python
#
#######################################################
def ica_convert2mne(unmixing, pca, info=None, method='fastica'):
# create MNE-type of ICA object
from mne.preprocessing.ica import ICA
n_comp = unmixing.shape[1]
if method == 'extended-infomax':
ica_method = 'infomax'
fit_params = dict(extended=True)
else:
ica_method = method
fit_params = None
ica = ICA(n_components=n_comp, method=ica_method, fit_params=fit_params)
# add PCA object
ica.pca = pca
# PCA info to be used bei MNE-Python
ica.pca_mean_ = pca.mean_
ica.pca_components_ = pca.components_
exp_var = pca.explained_variance_
ica.pca_explained_variance_ = exp_var
ica.pca_explained_variance_ratio_ = pca.explained_variance_ratio_
# ICA info
ica.n_components_ = n_comp
ica.n_components = n_comp
ica.components_ = unmixing # compatible with sklearn
ica.unmixing_= ica.components_ # as used by sklearn
ica.mixing_ = pinv(ica.unmixing_) # as used by sklearn
ica.unmixing_matrix_ = ica.unmixing_ / np.sqrt(exp_var[0:n_comp])[None, :] # as used by MNE-Python
ica.mixing_matrix_ = pinv(ica.unmixing_matrix_) # as used by MNE-Python
ica._ica_names = ['ICA%03d' % ii for ii in range(n_comp)]
ica.fun = method
if info:
ica.info = info
return ica
#######################################################
# #
# ica2data: back-transformation to data space #
# #
#######################################################
def ica2data(sources, ica, pca, idx_zero=None, idx_keep=None):
"""
ica2data: computes back-transformation from ICA to Data space
:param sources: shape [n_samples, n_features]
:param ica: ICA object from sklearn.decomposition
:param pca: PCA object from sklearn.decomposition
:param idx_zero: list of components to remove (optional)
:param idx_keep: list of components to remove (optional)
:return: data re-computed from ICA sources
"""
# n_features = pca.n_features_
n_features = pca.n_components_ # In rare cases, n_components_ < n_features_
n_samples, n_comp = sources.shape
A = ica.mixing_.copy() # ICA mixing matrix
# create data with full dimension
data = np.zeros((n_samples, n_features))
idx_all = np.arange(n_comp)
# if idx_keep is set it will overwrite idx_zero
if idx_keep is not None:
idx_zero = np.setdiff1d(idx_all, idx_keep)
# if idx_zero or idx_keep was set idx_zero is always defined
if idx_zero is not None:
A[:, idx_zero] = 0.0
# --------------------------------------------------------
# back transformation to PCA space
#
# identical results:
# data[:, :n_comp] = ica.inverse_transform(sources)
# data[:, :n_comp] = np.dot(sources, ica.mixing_.T)
# --------------------------------------------------------
data[:, :n_comp] = np.dot(sources, A.T)
# --------------------------------------------------------
# back transformation to Data space
#
# identical results:
# data = pca.inverse_transform(data)
# data = np.dot(data, np.sqrt(pca.explained_variance_[:, np.newaxis]) *
# pca.components_) + pca.mean_
# --------------------------------------------------------
# back transformation to data space
data = pca.inverse_transform(data)
return data
#######################################################
# #
# ica2data_single_component #
# #
#######################################################
def ica2data_single_components(sources, ica, pca, picks=None):
# back-transformation of single ICs to data space
# result is of shape: [n_components, data.shape]
n_features = pca.n_features_
n_samples, n_comp = sources.shape
# create data with full dimension
data = np.zeros((n_comp, n_samples, n_features))
# loop over all ICs
for icomp in range(n_comp):
data[icomp] = ica2data(sources, ica, pca, idx_keep=icomp)
# ===========================================
# for comparison with MNE
# ===========================================
# unmixing = np.eye(n_comp)
# unmixing[:n_comp, :n_comp] = ica.unmixing_matrix_
# unmixing = np.dot(unmixing, pca.components_[:n_comp])
# mixing = np.eye(n_comp)
# mixing[:n_comp, :n_comp] = ica.mixing_matrix_
# mixing = np.dot(pca.components_[:n_comp].T, mixing)
#
# proj_mat = np.dot(mixing[:, [icomp]], unmixing[[icomp], :])
# data = np.dot(proj_mat, data_tpq.T)
#
# # store mean TPQ values
# # x_mean_comp[:,icomp] = data.mean(axis=0)
# x_mean_comp[:, icomp] = data.mean(axis=1)
return data
#######################################################
# #
# apply ICA based on filtered data to unfiltered raw #
# #
#######################################################
def ica_apply_unfiltered(raw_unfilt, ica_filt, picks,
n_pca_components=None, reject_by_annotation=None,
start=None, stop=None):
"""Remove selected components from the unfiltered signal
and preserve the original mean and standard deviation
Note:
this is needed when ICA was trained on filtered data
but the cleaning will be applied on unfiltered data.
After cleaning the original (unfiltered) mean and standard
deviation is restored.
Parameters
----------
raw_unfilt : instance of Raw
The data to be processed (works inplace).
n_pca_components : int | float | None
The number of PCA components to be kept, either absolute (int)
or percentage of the explained variance (float). If None (default),
all PCA components will be used.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
Returns
-------
raw_unfilt_clean : instance of Raw after cleaning
"""
_check_preload(raw_unfilt, "ica.apply")
start, stop = _check_start_stop(raw_unfilt, start, stop)
data = raw_unfilt.get_data(picks, picks=picks, start=start, stop=stop,
reject_by_annotation=reject_by_annotation)
# compute pre-whitener and PCA data mean
pre_whiten = np.atleast_2d(np.ones(len(picks)) * data.std()).T
data, _ = ica_filt._pre_whiten(data, raw_unfilt.info, picks)
pca_mean_ = np.mean(data, axis=1)
# apply ICA on unfiltered data and preserve
# original mean and stddev
# Note, in MNE-Python ICA
# pca.mean_
# is the overall mean across all data channels
# ica.pre_whitener_
# is a vector with equal values being the
# overall standard deviation
ica_unfilt = ica_filt.copy()
ica_unfilt.pca_mean_ = pca_mean_
ica_unfilt._pre_whitener = pre_whiten
raw_unfilt_clean = ica_unfilt.apply(raw_unfilt, start=start, stop=stop,
n_pca_components = n_pca_components)
return raw_unfilt_clean
# ======================================================
#
# transform ICA sources to (MEG) data space
# Note: this routine makes use of the ICA object
# as defied by MNE-Python
#
# ======================================================
def transform_mne_ica2data(sources, ica, idx_zero=None, idx_keep=None):
"""
performs back-transformation from ICA to Data space using
rescaling as used as in MNE-Python
sources: shape [n_chan, n_samples]
ica: ICA object from MNE-Python
idx_zero: list of components to remove (optional)
idx_keep: list of components to remove (optional)
return: data re-computed from ICA sources
"""
import numpy as np
from scipy.linalg import pinv
n_features = len(ica.pca_components_)
n_comp, n_samples = sources.shape
A = ica.mixing_matrix_.copy()
# create data with full dimension
data = np.zeros((n_samples, n_features))
# if idx_keep is set it will overwrite idx_zero
if idx_keep is not None:
idx_all = np.arange(n_comp)
idx_zero = np.setdiff1d(idx_all, idx_keep)
# if idx_zero or idx_keep was set idx_zero is always defined
if idx_zero is not None:
A[:, idx_zero] = 0.0
# back transformation to PCA space
data[:, :n_comp] = np.dot(sources.T, A.T) # get PCA data
# back transformation to Data space
# is compatible to MNE-Python, but not to scikit-learn or JuMEG
data = (np.dot(data, ica.pca_components_) + ica.pca_mean_).T # [n_chan, n_samples]
# restore scaling
if ica.noise_cov is None: # revert standardization
data *= ica.pre_whitener_
else:
data = np.dot(pinv(ica.pre_whitener_, cond=1e-14), data)
return data
| bsd-3-clause |
nliolios24/textrank | textrank.py | 1 | 1442 | import string
import nltk
import sys
import networkx as nx
import numpy as np
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
word_count = 400
sentences = []
for arg in sys.argv[1:]:
try:
sentences += tokenizer.tokenize(open(arg).read())
except Exception, e:
raise e
def normalize(sentence):
sentence = sentence.lower()
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(sentence)
filtered_words = [w for w in tokens if not w in stopwords.words('english')]
return " ".join(filtered_words)
def textrank():
for sentence in sentences:
sentence = normalize(sentence)
matrix = CountVectorizer().fit_transform(sentences)
normalized = TfidfTransformer().fit_transform(matrix)
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return sorted(((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
def summarize():
summary_list = zip(*textrank())[1]
summary = ''
for i in xrange(len(summary_list)):
if len(summary) < word_count:
summary += ' ' + summary_list[i]
print summary
if __name__ == '__main__':
summarize()
| mit |
dr-bigfatnoob/effort | datasets/cleaned/maxwell.py | 1 | 2885 | from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from datasets.dataset import Dataset, Meta, read_pandas_dataframe
from datasets.cleaned import data_to_use
class Maxwell(Dataset):
def __init__(self):
Dataset.__init__(self, data=Maxwell.data(),
dec_meta=Maxwell.decision_meta(),
obj_meta=Maxwell.objective_meta())
@staticmethod
def decision_meta():
return [
Meta(index=0, name="App", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=1, name="Har", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=2, name="Dba", type=Meta.DISC, values=range(0, 5), is_obj=False),
Meta(index=3, name="Ifc", type=Meta.DISC, values=range(1, 3), is_obj=False),
Meta(index=4, name="Source", type=Meta.DISC, values=range(1, 3), is_obj=False),
Meta(index=5, name="Telonuse", type=Meta.DISC, values=range(0, 2), is_obj=False),
Meta(index=6, name="Nlan", type=Meta.DISC, values=range(1, 5), is_obj=False),
Meta(index=7, name="T01", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=8, name="T02", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=9, name="T03", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=10, name="T04", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=11, name="T05", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=12, name="T06", type=Meta.DISC, values=range(1, 5), is_obj=False),
Meta(index=13, name="T07", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=14, name="T08", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=15, name="T09", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=16, name="T10", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=17, name="T11", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=18, name="T12", type=Meta.DISC, values=range(2, 6), is_obj=False),
Meta(index=19, name="T13", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=20, name="T14", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=21, name="T15", type=Meta.DISC, values=range(1, 6), is_obj=False),
Meta(index=22, name="Size", type=Meta.CONT, values=range(48, 3643), is_obj=False)]
@staticmethod
def objective_meta():
return [Meta(index=23, name="Effort", type=Meta.CONT, range=(583, 63694), is_obj=True)]
@staticmethod
def data():
return read_pandas_dataframe(data_to_use.data_maxwell(), read_header=False)
if __name__ == "__main__":
print(len(Maxwell.data()[0]))
| mit |
havok2063/cookiecutter-marvin | {{cookiecutter.repo_name}}/docs/sphinx/conf.py | 1 | 10487 | # -*- coding: utf-8 -*-
#
# Marvin documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 10 08:50:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# Importing matplotlib here with agg to prevent tkinter error in readthedocs
import matplotlib
matplotlib.use('agg')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../python/'))
# set the version
import {{cookiecutter.package_name}}
package_version = {{cookiecutter.package_name}}.__version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'sphinxcontrib.httpdomain', 'sphinxcontrib.autohttp.flask',
'sphinxcontrib.autohttp.flaskqref',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '{{cookiecutter.package_title}}'
copyright = '{0}, {1}'.format('{{cookiecutter.year}}', '{{cookiecutter.full_name}}')
author = '{{cookiecutter.full_name}}'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = package_version
# The full version, including alpha/beta/rc tags.
release = package_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_rtd_theme.get_html_theme_path()
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = u'Marvin v2.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}Docs'.format(project)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '{{cookiecutter.package_name}}', u'{0} Documentation'.format(project),
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'groupwise'
modindex_common_prefix = ['{{cookiecutter.package_name}}.']
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = """
.. |numpy_array| replace:: Numpy array
.. _numpy_array: http://example.com/
"""
| bsd-3-clause |
Aditya8795/Image-Processing- | DCT.py | 2 | 4996 |
import io
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from scipy import fftpack
from urllib.request import urlopen
import IPython
# [Source](http://bugra.github.io/work/notes/2014-07-12/discre-fourier-cosine-transform-dft-dct-image-compression/)
# [My Notes](https://docs.google.com/document/d/1yIDTEsXFkLLV5sLmPHtV8-X3GZ2Oa-X2zYvS1BaXMpw/edit?usp=sharing)
#image_url='http://i.imgur.com/8vuLtqi.png'
image_url='file:///C:/Users/Aditya/Pictures/crazygirl.jpg'
# reads the image from url using PIL and converting it into a numpy array after converted grayscale image.
def get_image_from_url(image_url='http://i.imgur.com/8vuLtqi.png', size=(128, 128)):
file_descriptor = urlopen(image_url)
image_file = io.BytesIO(file_descriptor.read())
image = Image.open(image_file)
img_color = image.resize(size, 1)
img_grey = img_color.convert('L')
img = np.array(img_grey, dtype=np.float)
return img
def get_2D_dct(img):
""" Get 2D Cosine Transform of Image
"""
return fftpack.dct(fftpack.dct(img.T, norm='ortho').T, norm='ortho')
def get_2d_idct(coefficients):
""" Get 2D Inverse Cosine Transform of Image
"""
return fftpack.idct(fftpack.idct(coefficients.T, norm='ortho').T, norm='ortho')
def get_reconstructed_image(raw):
img = raw.clip(0, 255)
img = img.astype('uint8')
img = Image.fromarray(img)
return img
pixels = get_image_from_url(image_url=image_url, size=(256, 256))
dct_size = pixels.shape[0]
dct = get_2D_dct(pixels)
reconstructed_images = []
for ii in range(dct_size):
dct_copy = dct.copy()
dct_copy[ii:,:] = 0
dct_copy[:,ii:] = 0
# Reconstructed image
r_img = get_2d_idct(dct_copy);
reconstructed_image = get_reconstructed_image(r_img);
# Create a list of images
reconstructed_images.append(reconstructed_image);
plt.figure(figsize=(16, 12));
plt.scatter(range(dct.ravel().size), np.log10(np.abs(dct.ravel())), c='#348ABD', alpha=.3);
'''
The point is instead of comparing to only Root Mean Squared Error(RMMS) to learn where to stop in the coefficients,
one could check better metrics which consider visual fidelity or even perceived quality to find
the sweet spot between compression ratio and image quality.
It is easy to get very large coefficients and reject very small coefficients in the reconstructed image
but not very easy to either include or reject middle values based on their solely amplitudes.
In these coefficients, one needs to look at the frequencies that they belong to,
if they are in somehow high frequency range, then it would be rejected
whereas if they belong to lower frequency range, it may introduce noticeable and large artifacts into the signal.
----
so basically small coefficients means that the basis image corresponding to that coefficient does not play
a large part in defining the original image itself thus those can be neglected.
but if the values are all middlish with all the basis images contributing about the same to the original image
then we need to turn our eyes to the frequencies of the coefficients which basically means we check out whether
they are talking about the the slow changes and constant colors (low frequency information)
or the edges and fast transition information (high frequency information).
this can be clearly seen as u,v increases the basis images start to have greater gradients.
'''
plt.title('DCT Coefficient Amplitude vs. Order of Coefficient');
plt.xlabel('Order of DCT Coefficients');
plt.ylabel('DCT Coefficients Amplitude in log scale');
'''
If we look at the first first 2500 coefficients in a 50x50 grid, then we could see that a lot of the coefficients are actually very small
comparing to the few very large ones. This not only provides a good compaction for the image(less coefficients means high compaction rate),
but also provides a good compromise between compression and image quality.
{Generally, very low frequencies have a higher ratio of magnitude
orders and similar to very high frequencies.} ???!!
* less coefficients means you get to convey a ok approximation of the image with far less space.
'''
plt.matshow(np.abs(dct[:50, :50]), cmap=plt.cm.Paired);
plt.title('First 2500 coefficients in Grid');
'''
see This explains what this guy says here https://youtu.be/_bltj_7Ne2c?t=869
see the coefficients in the code is the values in the Transform matrix (after we operate dct on the image)
Now each coefficient holds info on the amount of similarity between the image and the corresponding basis image
(corresponding as in the coefficient itself is a function of u,v as is the basis functions)
now with the FIRST 50 coefficients itself the image is reconstructed to a fair (awesome degree)
'''
fig = plt.figure(figsize=(16, 16))
for ii in range(64):
plt.subplot(8, 8, ii + 1)
plt.imshow(reconstructed_images[ii], cmap=plt.cm.gray)
plt.grid(False);
plt.xticks([]);
plt.yticks([]);
plt.show()
| mit |
Achuth17/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/linear_model/plot_robust_fit.py | 147 | 3050 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
dmnfarrell/mhcpredict | epitopepredict/sequtils.py | 1 | 22463 | #!/usr/bin/env python
"""
Sequence utilities and genome annotation methods
Created November 2013
Copyright (C) Damien Farrell
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, print_function
import sys, os, shutil, string, types
import csv, glob, pickle, operator
import time, re
from collections import OrderedDict
import numpy as np
import pandas as pd
from Bio import SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
from . import utilities
featurekeys = ['type','protein_id','locus_tag','gene','db_xref',
'product', 'note', 'translation','pseudo','pseudogene','start','end','strand']
typecolors = ['blue','green','brown','orange','purple','lightblue','yellow','red']
def draw_genome_map(infile, filename=None):
"""Draw whole circular genome"""
from Bio.Graphics import GenomeDiagram
from Bio.SeqUtils import GC
from reportlab.lib import colors
genome = SeqIO.read(infile,'genbank')
gdd = GenomeDiagram.Diagram('test')
gdt1 = gdd.new_track(4, greytrack=1, name='CDS', scale=0)
gdt2 = gdd.new_track(3, greytrack=1, name='tRNA', scale=0)
gdt3 = gdd.new_track(2, greytrack=0, name='GC content', scale=0)
gdf1 = gdt1.new_set('feature')
gdf2 = gdt2.new_set('feature')
gdgs = gdt3.new_set('graph')
graphdata = [(f.location.start,GC(f.extract(genome.seq))) for f in genome.features]
#print graphdata
gdgs.new_graph(graphdata, 'GC content', style='line', colour=colors.black)
for feature in genome.features:
if feature.type == 'CDS':
gdf1.add_feature(feature, label=False, colour=colors.green)
elif feature.type == 'tRNA' :
gdf2.add_feature(feature, label=True, colour=colors.red)
gdd.draw(format='circular', orientation='landscape',
tracklines=0, pagesize='A4', fragments=5, circular=1)
if filename==None:
filename = 'genediagram.png'
gdd.write(filename, "PNG")
return filename
def distance_tree(filename=None, seqs=None, ref=None):
"""Basic phylogenetic tree for an alignment"""
from Bio import Phylo
if seqs is not None:
aln = clustal_alignment(None, seqs)
filename = 'temp.dnd'
tree = Phylo.read(filename, 'newick')
leaf_list = tree.get_terminals()
if ref != None:
tree.root_with_outgroup(ref)
#Phylo.draw_graphviz(tree,font_size='9', prog='neato')
f = plt.figure(figsize=(8,8))
ax=f.add_subplot(111)
ax.set_axis_bgcolor('white')
Phylo.draw(tree, axes=ax)
return tree
def ete_tree(aln):
"""Tree showing alleles"""
from ete2 import Tree,PhyloTree,TreeStyle,NodeStyle
t = Tree('temp.dnd')
ts = TreeStyle()
ts.show_leaf_name = True
ts.mode = "c"
ts.arc_start = -180
ts.arc_span = 180
cutoff=0.25
def func(node):
if node.name=='NoName': #or not node.name in metric:
return False
#if metric[node.name]<=cutoff:
# return True
matches = filter(func, t.traverse())
print (len(matches), "nodes have distance <=%s" %cutoff)
nst1 = NodeStyle()
nst1["bgcolor"] = "Yellow"
for n in matches:
n.set_style(nst1)
nst2 = NodeStyle()
nst2["bgcolor"] = "LightGreen"
#hlanodes = [t.get_leaves_by_name(name=r)[0] for r in refalleles]
#for n in hlanodes:
# n.set_style(nst2)
t.show(tree_style=ts)
return
def remote_blast(db, query, maxseqs=50, evalue=0.001, **kwargs):
"""Remote blastp.
Args:
query: fasta file with sequence to blast
db: database to use - nr, refseq_protein, pdb, swissprot
"""
from Bio.Blast.Applications import NcbiblastpCommandline
output = os.path.splitext(query)[0]+'_blast.txt'
outfmt = '"6 qseqid sseqid qseq sseq pident qcovs length mismatch gapopen qstart qend sstart send evalue bitscore stitle"'
cline = NcbiblastpCommandline(query=query, db=db, max_target_seqs=maxseqs, outfmt=outfmt,
evalue=evalue, out=output, remote=True, **kwargs)
stdout, stderr = cline()
return
def local_blast(database, query, output=None, maxseqs=50, evalue=0.001,
compress=False, cmd='blastp', cpus=2, show_cmd=False, **kwargs):
"""Blast a local database.
Args:
database: local blast db name
query: sequences to query, list of strings or Bio.SeqRecords
Returns:
pandas dataframe with top blast results
"""
if output == None:
output = os.path.splitext(query)[0]+'_blast.txt'
from Bio.Blast.Applications import NcbiblastxCommandline
outfmt = '"6 qseqid sseqid qseq sseq pident qcovs length mismatch gapopen qstart qend sstart send evalue bitscore stitle"'
cline = NcbiblastxCommandline(query=query, cmd=cmd, db=database,
max_target_seqs=maxseqs,
outfmt=outfmt, out=output,
evalue=evalue, num_threads=cpus, **kwargs)
if show_cmd == True:
print (cline)
stdout, stderr = cline()
return
def get_blast_results(filename):
"""
Get blast results into dataframe. Assumes column names from local_blast method.
Returns:
dataframe
"""
cols = ['qseqid','sseqid','qseq','sseq','pident','qcovs','length','mismatch','gapopen',
'qstart','qend','sstart','send','evalue','bitscore','stitle']
res = pd.read_csv(filename, names=cols, sep='\t')
#res = res[res['pident']>=ident]
return res
def blast_sequences(database, seqs, labels=None, **kwargs):
"""
Blast a set of sequences to a local or remote blast database
Args:
database: local or remote blast db name
'nr', 'refseq_protein', 'pdb', 'swissprot' are valide remote dbs
seqs: sequences to query, list of strings or Bio.SeqRecords
labels: list of id names for sequences, optional but recommended
Returns:
pandas dataframe with top blast results
"""
remotedbs = ['nr','refseq_protein','pdb','swissprot']
res = []
if labels is None:
labels = seqs
recs=[]
for seq, name in zip(seqs,labels):
if type(seq) is not SeqRecord:
rec = SeqRecord(Seq(seq),id=name)
else:
rec = seq
name = seq.id
recs.append(rec)
SeqIO.write(recs, 'tempseq.fa', "fasta")
if database in remotedbs:
remote_blast(database, 'tempseq.fa', **kwargs)
else:
local_blast(database, 'tempseq.fa', **kwargs)
df = get_blast_results(filename='tempseq_blast.txt')
return df
def fasta_to_dataframe(infile, header_sep=None, key='locus_tag', seqkey='translation'):
"""Get fasta proteins into dataframe"""
recs = SeqIO.parse(infile,'fasta')
keys = [key,seqkey,'description']
data = [(r.name,str(r.seq),str(r.description)) for r in recs]
df = pd.DataFrame(data,columns=(keys))
df['type'] = 'CDS'
#fix bad names
if header_sep not in ['',None]:
df[key] = df[key].apply(lambda x: x.split(header_sep)[0],1)
df[key] = df[key].str.replace('|','_')
return df
def convert_sequence_format(infile, outformat='embl'):
"""convert sequence files using SeqIO"""
informat = os.path.splitext(infile)[1][1:]
if informat == 'fa':
informat = 'fasta'
print ('input format: %s' %informat)
print ('output format: %s' %outformat)
outfile = os.path.splitext(infile)[0]+'.'+outformat
count = SeqIO.convert(infile, informat, outfile, outformat)
print ("Converted %i records" %count)
return
def get_cds(df):
"""Get CDS with transaltions from genbank dataframe"""
cds = df[df.type=='CDS']
cdstrans = cds[cds.translation.notnull()]
return cdstrans
def fasta_format_from_feature(feature):
"""Get fasta formatted sequence from a genome feature"""
name = feature.qualifiers['locus_tag'][0]
if not feature.qualifiers.has_key('translation'):
return ''
seq = feature.qualifiers['translation'][0]
rec = SeqRecord(Seq(seq),id=name,
description=feature.qualifiers['product'][0])
fastafmt = rec.format("fasta")
return fastafmt
def dataframe_to_seqrecords(df, seqkey='sequence', idkey='id'):
"""dataframe to list of Bio.SeqRecord objects"""
seqs=[]
for i,r in df.iterrows():
s=SeqRecord(Seq(r[seqkey]),id=r[idkey])
seqs.append(s)
return seqs
def dataframe_to_fasta(df, seqkey='translation', idkey='locus_tag',
descrkey='description',
outfile='out.faa'):
"""Genbank features to fasta file"""
seqs=[]
for i,row in df.iterrows():
if descrkey in df.columns:
d=row[descrkey]
else:
d=''
rec = SeqRecord(Seq(row[seqkey]),id=row[idkey],
description=d)
seqs.append(rec)
SeqIO.write(seqs, outfile, "fasta")
return outfile
def features_to_dataframe(recs, cds=False, select='all'):
"""Get genome records from a biopython features object into a dataframe
returns a dataframe with a row for each cds/entry.
Args:
recs: seqrecords object
cds: only return cds
select: 'first' record or 'all'
"""
if select == 'first':
recs = [recs[0]]
res = []
for rec in recs:
feats=[]
for (item, f) in enumerate(rec.features):
x = f.__dict__
q = f.qualifiers
#featurekeys = list(q.keys()) + ['type']
#print (featurekeys)
x.update(q)
d = {}
d['locus'] = rec.name
d['start'] = f.location.start
d['end'] = f.location.end
d['strand'] = f.location.strand
for i in featurekeys:
if i in x:
if type(x[i]) is list:
d[i] = x[i][0]
else:
d[i] = x[i]
feats.append(d)
df = pd.DataFrame(feats, columns=featurekeys)
#print (df.columns)
res.append(df)
final = pd.concat(res)
final['length'] = final.translation.astype('str').str.len()
final = check_tags(final)
if cds == True:
final = get_cds(final)
final['order'] = range(1,len(final)+1)
if len(final) == 0:
print ('ERROR: genbank file return empty data, check that the file contains protein sequences '\
'in the translation qualifier of each protein feature.' )
return final
def genbank_to_dataframe(infile, cds=False):
"""Get genome records from a genbank file into a dataframe
returns a dataframe with a row for each cds/entry"""
recs = list(SeqIO.parse(infile,'genbank'))
df = features_to_dataframe(recs, cds)
return df
def embl_to_dataframe(infile, cds=False):
recs = list(SeqIO.parse(infile,'embl'))
df = features_to_dataframe(recs, cds)
return df
def check_tags(df):
"""Check genbank tags to make sure they are not empty"""
def replace(x):
if pd.isnull(x.locus_tag):
return x.gene
else:
return x.locus_tag
df['locus_tag'] = df.apply(replace,1)
return df
def features_summary(df):
"""Genbank dataframe summary"""
def hypo(val):
val = val.lower()
kwds=['hypothetical','conserved protein','unknown protein']
for k in kwds:
if k in val:
return True
else:
return False
coding = df[df.type=='CDS']
trna = df[df.type=='tRNA']
products = coding[coding['product'].notnull()]
cdstrans = coding[coding.translation.notnull()]
hypo = products[products['product'].apply(hypo)]
pseudo = df[ (df.type == 'gene') & (df.pseudo.notnull())]
notags = df[df.locus_tag.isnull()]
repeats = df[ (df.type == 'repeat_region')]
s = {}
s['total features'] = len(df)
s['coding sequences'] = len(coding)
s['cds with translations'] = len(cdstrans)
s['cds with gene names'] = len(coding.gene.dropna())
s['hypothetical'] = len(hypo)
s['pseudogenes'] = len(pseudo)
s['trna'] = len(trna)
s['repeat_region'] = len(repeats)
s['no locus tags'] = len(notags)
if len(cdstrans)>0:
avlength = int(np.mean([len(i) for i in cdstrans.translation]))
s['mean sequence length'] = avlength
return s
def find_keyword(f):
"""Get keyword from a field"""
f = f[:100]
f = re.split('[ |/,.:]+',f)
l=[]
for i in f:
if i.startswith('Rv'):
s = i.strip()
l.append(s)
l = list(OrderedDict.fromkeys(l))
return l
def index_genbank_features(gb_record, feature_type, qualifier):
"""Index features by qualifier value for easy access"""
answer = dict()
for (index, feature) in enumerate(gb_record.features):
#print (index, feature)
if feature.type==feature_type:
if qualifier in feature.qualifiers:
values = feature.qualifiers[qualifier]
if not type(values) is list:
values = [values]
for value in values:
if value in answer:
print ("WARNING - Duplicate key %s for %s features %i and %i" \
% (value, feature_type, answer[value], index))
else:
answer[value] = index
return answer
def get_genes_by_location(genome, feature, within=20):
"""Gets all featues within a given distance of a gene"""
start = feature.location.start
F = []
dists = []
for (i, feat) in enumerate(genome.features):
if feat.type != 'CDS':
continue
#print feat.location.start in feature
dist = abs(feat.location.start - start)
if dist < within:
F.append((feat, dist))
#print i, start, feat.location, feat.qualifiers['locus_tag'][0], dist
if len(F)==0:
return None
F = [i[0] for i in sorted(F, key=operator.itemgetter(1))]
return F
def get_translation(feature, genome, cds=True):
"""Check the translation of a cds feature"""
trtable = "Bacterial"
q = feature.qualifiers
#trans = q1['translation'][0]
seq = feature.extract(genome.seq)
e=None
try:
protein = seq.translate(table=trtable,cds=cds,to_stop=True)
#print ('protein seq:',protein)
except Exception as e:
protein = ''
return protein, e
def pairwise_alignment(rec1,rec2):
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
matrix = matlist.blosum62
gap_open = -10
gap_extend = -0.5
alns = pairwise2.align.localds(rec1, rec2, matrix, gap_open, gap_extend)
return alns
def clustal_alignment(filename=None, seqs=None, command="clustalw"):
"""Align 2 sequences with clustal"""
if filename == None:
filename = 'temp.faa'
SeqIO.write(seqs, filename, "fasta")
name = os.path.splitext(filename)[0]
from Bio.Align.Applications import ClustalwCommandline
cline = ClustalwCommandline(command, infile=filename)
stdout, stderr = cline()
align = AlignIO.read(name+'.aln', 'clustal')
return align
def needle_alignment(seq1,seq2,outfile='needle.txt'):
"""Align 2 sequences with needle"""
SeqIO.write(seq1, 'alpha.faa', "fasta")
SeqIO.write(seq2, 'beta.faa', "fasta")
from Bio.Emboss.Applications import NeedleCommandline
cline = NeedleCommandline(asequence='alpha.faa', bsequence='beta.faa',
gapopen=30, gapextend=0.5, outfile=outfile)
stdout, stderr = cline()
align = AlignIO.read('needle.txt',"emboss")
return align
def muscle_alignment(filename=None, seqs=None):
"""Align 2 sequences with muscle"""
if filename == None:
filename = 'temp.faa'
SeqIO.write(seqs, filename, "fasta")
name = os.path.splitext(filename)[0]
from Bio.Align.Applications import MuscleCommandline
cline = MuscleCommandline(input=filename, out=name+'.txt')
stdout, stderr = cline()
align = AlignIO.read(name+'.txt', 'fasta')
return align
def show_alignment(aln, diff=False, offset=0):
"""
Show a sequence alignment
Args:
aln: alignment
diff: whether to show differences
"""
ref = aln[0]
l = len(aln[0])
n=60
chunks = [(i,i+n) for i in range(0, l, n)]
for c in chunks:
start,end = c
lbls = np.arange(start,end,10)-offset
print (('%-21s' %'name'),''.join([('%-10s' %i) for i in lbls]))
print (('%21s' %ref.id[:20]), ref.seq[start:end])
if diff == True:
for a in aln[1:]:
diff=''
for i,j in zip(ref,a):
if i != j:
diff+=j
else:
diff+='-'
name = a.id[:20]
print (('%21s' %name), diff[start:end])
else:
for a in aln[1:]:
name = a.id[:20]
print (('%21s' %name), a.seq[start:end])
return
def get_identity(aln):
"""Get sequence identity of alignment for overlapping region only"""
j=0
i=0
record = aln[1]
start=None; end=None #use these to get local overlap
for aa in record.seq:
aa1 = aln[0].seq[j]
if aa == '-' or aa1 == '-':
pass
else:
if aa == aa1:
if start == None:
start = j
end = j+1
i+=1
j+=1
overlap = end-start
percent = round(100.0*i/overlap,1)
return percent, overlap
def format_alignment(aln):
t=''
for i in range(0,len(aln[0]),80):
for a in aln:
t+=('%15s' %a.id) +' '+ a.seq.tostring()[i:i+80]+'\n'
t+='\n'
return t
def alignment_to_dataframe(aln):
"""Sequence alignment to dataframe"""
alnrows = [[a.id,str(a.seq),a.description] for a in aln]
df = pd.DataFrame(alnrows,columns=['name','seq','description'])
return df
def get_feature_qualifier(f, qualifier):
if f.qualifiers.has_key(qualifier):
fq = f.qualifiers[qualifier][0].lower()
else:
fq = None
return fq
def get_sequence(genome, name):
"""Get the sequence for a protein in a dataframe with
genbank/sequence data"""
return genome[genome.locus_tag==name].translation.iloc[0]
def fetch_protein_sequences(searchterm, filename='found.fa' ):
"""
Fetch protein seqs using ncbi esearch and save results to a
fasta file.
Args:
searchterm: entrez search term
filename: fasta file name to save results
Returns:
sequence records as a dataframe
"""
from Bio import Entrez
from Bio import SeqIO
Entrez.email = "A.N.Other@example.com"
handle = Entrez.esearch(db="protein", term=searchterm, retmax=200)
record = Entrez.read(handle)
handle.close()
#fetch the sequences
handle = Entrez.efetch(db="protein", rettype="fasta", retmode="text", id=record['IdList'])
seq_record = SeqIO.parse(handle, "fasta")
recs = [r for r in seq_record]
handle.close()
outfile = open(filename, "w")
SeqIO.write(recs, outfile, "fasta")
df = fasta_to_dataframe(filename)
#remove redundancy
df = df.drop_duplicates('translation')
df = df[-df.translation.str.contains('X')]
print ('%s non-redundant sequences retrieved' %len(df))
#save as fasta file
dataframe_to_fasta(df, outfile=filename)
return recs
def show_alignment_html(alnrows, seqs, width=80, fontsize=15, label='name'):
"""
Get html display of sub-sequences on multiple protein alignment.
Args:
alnrows: a dataframe of aligned sequences
seqs: sub-sequences/epitopes to draw if present
label: key from dataframe to use as label for sequences
Returns:
html code
"""
import matplotlib as mpl
l=len(seqs[0])
found = []
for row in alnrows.seq:
x = [row.find(s) for s in seqs]
x = [i for i in x if i!=-1]
#print x
found.append(x)
seqhtml=[]
f=[]
[f.extend(i) for i in found]
f = sorted(list(set(f)))
cmap = mpl.cm.get_cmap('Set3')
c=1
#unique color for each found sub-sequence
colors={}
for i in f:
clr = cmap(float(c+0.1)/len(f))
colors[i] = mpl.colors.rgb2hex(clr)
c+=1
seqhtml.append('<div style="font-family: monospace;letter-spacing: -.3em;font-size:%spx">' %fontsize)
clr = ''
chunks = []
alnlength = len(alnrows.iloc[0].seq)
l = 11
for idx in range(0,alnlength,width):
f=0
seqhtml.append('<span style="letter-spacing:.2em;font-weight: bold">%s</span><br>' %idx)
cidx=0
for x,row in alnrows.iterrows():
if len(found[f])==0:
f+=1
continue
try:
name = row[label]
except:
name = row.definition
seq = row.seq
for i in range(idx,idx+width):
if i>alnlength-1: continue
if i in found[f]:
cidx = i
clr = colors[cidx]
elif i-cidx >= l:
clr = ''
seqhtml.append('<span style="background-color:%s">%s </span>' %(clr,seq[i]))
clr = ''
seqhtml.append('<span>   </span>')
seqhtml.append('<span style="letter-spacing:.1em; font-weight: bold">%s </span>' %name)
seqhtml.append('<br>')
f+=1
seqhtml = ''.join(seqhtml)
return seqhtml
| apache-2.0 |
NeuPhysics/codebase | ipynb/matter/py-server/two-freq-trans-prob.py | 1 | 1925 |
# coding: utf-8
# In[ ]:
import numpy as np
from scipy.integrate import odeint
from scipy.integrate import ode
import matplotlib.pylab as plt
endpoint = 10000000; # integration range
dx = 10.0; # step size
lam0 = 0.845258; # in unit of omegam, omegam = 3.66619*10^-17
dellam = np.array([0.00003588645221954444, 0.06486364865874367]); # deltalambda/omegam
ks = [1.0,1.0/90]; # two k's
thm = 0.16212913985547778; # theta_m
psi0, x0 = [1.0+0.j, 0.0], 0
savestep = 1000;
xlin = np.arange(dx,endpoint+1*dx, dx)
psi = np.zeros([len(xlin) , 2], dtype='complex_')
xlinsave = np.zeros(len(xlin)/savestep);
psisave = np.zeros([len(xlinsave) , 2], dtype='complex_')
def hamiltonian(x, deltalambda, k, thetam):
return [[ 0, 0.5* np.sin(2*thetam) * ( deltalambda[0] * np.sin(k[0]*x) + deltalambda[1] * np.sin(k[1]*x) ) * np.exp( 1.0j * ( - x - np.cos(2*thetam) * ( ( deltalambda[0]/k[0] * np.cos(k[0]*x) + deltalambda[1]/k[1] * np.cos(k[1]*x) ) ) ) ) ], [ 0.5* np.sin(2*thetam) * ( deltalambda[0] * np.sin(k[0]*x) + deltalambda[1] * np.sin(k[1]*x) ) * np.exp( -1.0j * ( - x - np.cos(2*thetam) * ( deltalambda[0] /k[0] * np.cos(k[0]*x) + deltalambda[1] /k[1] * np.cos(k[1]*x) ) ) ), 0 ]] # Hamiltonian for double frequency
def deripsi(t, psi, deltalambda, k , thetam):
return -1.0j * np.dot( hamiltonian(t, deltalambda,k,thetam), [psi[0], psi[1]] )
sol = ode(deripsi).set_integrator('zvode', method='bdf', atol=1e-8, with_jacobian=False)
sol.set_initial_value(psi0, x0).set_f_params(dellam,ks,thm)
flag = 0
flagsave = 0
while sol.successful() and sol.t < endpoint:
sol.integrate(xlin[flag])
if np.mod(flag,savestep)==0:
psisave[flagsave] = sol.y
xlinsave[flagsave] = sol.t
flagsave = flagsave + 1
flag = flag + 1
# print sol.t, sol.y
prob = np.absolute(psisave)**2
probtran = np.transpose(prob)
np.save("probtran",probtran)
# In[ ]:
| mit |
supergis/QGIS | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 12 | 3376 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import QVariant
from qgis.core import QgsField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Raster layer histogram')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterRaster(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BINS,
self.tr('Number of bins'), 2, None, 10))
self.addOutput(OutputHTML(self.PLOT, self.tr('Histogram')))
self.addOutput(OutputTable(self.TABLE, self.tr('Table')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
nbins = self.getParameterValue(self.BINS)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer, progress)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([unicode(bins[i]) + '-' + unicode(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
wyfzeqw/Environmental-Influence-on-Crowd-Dynamics | Project/3_fsg.py | 1 | 1508 | import numpy as np
from pandas import *
import math
# a = np.arange(9).reshape((3,3))
arr = np.loadtxt(open("csv files/matlab.csv","rb"),delimiter=",",skiprows=0)
data = np.loadtxt(open("csv files/output_tindex.csv","rb"),delimiter=",",skiprows=0)
# slice the arr(matlab csv) with the start time and end time
# then drop the duplicates from the ndarray using panda
# array = DataFrame(arr[:,2:]).drop_duplicates().values
# print len(array)
array = np.loadtxt(open("csv files/groupinfo.csv","rb"),delimiter=",",skiprows=0)
def distanceDelta(x,t):
dist4=10000
ndarray=np.array([element for element in arr if element[3] == t])
for l in ndarray:
if math.hypot(x[0]-l[0], x[1]-l[1]) < dist4:
dist4 = math.hypot(x[0]-l[0], x[1]-l[1])
return dist4
# return [x[2],t,dist4,math.exp(-theta3/dist4**2)]
# a=[x,y,frame,pede]
# b=[x,y,start,end]
# c=[0,20,40]
def fsg(Data,theta3,theta4):
theta3 = 0.5
theta4 = 0.01
output3 = []
for a in Data:
for b in array:
if a[2] in range(int(b[0]),int(b[1])+1):
distance = distanceDelta(a,b[1])
if distance == 0 and 10000:
output3.append([a[2],a[3],b[1],distance,1]) #[a[2],a[3],b[1],distance,1]
print [a[2],a[3],b[1],distance,1]
else:
output3.append([a[2],a[3],b[1],distance,math.exp(-theta3/distance**2)])
print [a[2],a[3],b[1],distance,math.exp(-theta3/distance**2)]
# [frame,pedestrian index,stationary group index,distance,exp]
# np.savetxt('csv files/4_fsgoutput.csv', output3, delimiter = ',')
fsg(data,0.5,0) | mit |
mattgiguere/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
abhitopia/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 51 | 12969 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(
expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(expected_values,
col,
actual_dict[col]))
class TensorFlowDataFrameTestCase(test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _make_test_csv(self):
f = tempfile.NamedTemporaryFile(
dir=self.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse(self):
f = tempfile.NamedTemporaryFile(
dir=self.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (
("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else "")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord(self):
f = tempfile.NamedTemporaryFile(dir=self.get_temp_dir(), delete=False)
w = tf_record.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(
pandas_df, batch_size=10, shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({
"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")
})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = self._make_test_csv_sparse()
feature_spec = {
"int": parsing_ops.FixedLenFeature(None, dtypes.int16, np.nan),
"float": parsing_ops.VarLenFeature(dtypes.float16),
"bool": parsing_ops.VarLenFeature(dtypes.bool),
"string": parsing_ops.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = self._make_test_tfrecord()
features = {
"fixed_len_float":
parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32, default_value=[0.0, 0.0]),
"var_len_int":
parsing_ops.VarLenFeature(dtype=dtypes.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = self._make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
test.main()
| apache-2.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/estimators/estimator.py | 7 | 55607 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == "global_step":
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float, np.float32, np.int64, np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| mit |
manipopopo/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 10 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=(0, 1)),
cov=numpy.squeeze(current_prediction["covariance"], axis=(0, 1)))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
q1ang/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
lensacom/sparkit-learn | splearn/tests/test_pipeline.py | 1 | 11950 | import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.utils.testing import (assert_array_equal, assert_equal,
assert_false, assert_raises, assert_true)
from splearn import DictRDD, ArrayRDD
from splearn.decomposition import SparkTruncatedSVD
from splearn.feature_extraction.text import SparkCountVectorizer
from splearn.feature_selection import SparkVarianceThreshold
from splearn.linear_model.logistic import SparkLogisticRegression
from splearn.pipeline import SparkFeatureUnion, SparkPipeline, make_sparkunion
from splearn.utils.testing import SplearnTestCase
# class PipelineTestCase(SplearnTestCase):
# def generate_junkfood(self, blocks=None):
# X = (
# "the pizza pizza beer copyright",
# "the pizza burger beer copyright",
# "the the pizza beer beer copyright",
# "the burger beer beer copyright",
# "the coke burger coke copyright",
# "the coke burger burger",
# )
# Z_rdd = self.sc.parallelize(X)
# Z = ArrayRDD(Z_rdd, bsize=blocks)
# return X, Z
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, Z, **fit_params):
self.fit_params = fit_params
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, Z):
return Z
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, Z, should_succeed=False):
self.successful = should_succeed
def predict(self, Z):
return self.successful
class TestFeatureUnion(SplearnTestCase):
def test_same_result_withdictrdd(self):
X, X_rdd = self.make_text_rdd(2)
Y_rdd = ArrayRDD(self.sc.parallelize([None] * len(X), 4), bsize=2)
Z = DictRDD([X_rdd, Y_rdd], columns=("X", "y"), bsize=2)
loc_char = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
dist_char = SparkCountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
loc_word = CountVectorizer(analyzer="word")
loc_word_2 = CountVectorizer(analyzer="word")
dist_word = SparkCountVectorizer(analyzer="word")
dist_word_2 = SparkCountVectorizer(analyzer="word")
loc_union = FeatureUnion([
("chars", loc_char),
("words", loc_word),
("words2", loc_word_2)
])
dist_union = SparkFeatureUnion([
("chars", dist_char),
("words", dist_word),
("words2", dist_word_2)
])
# test same feature names
loc_union.fit(X)
dist_union.fit(Z)
converted_union = dist_union.to_scikit()
assert_equal(
loc_union.get_feature_names(),
dist_union.get_feature_names(),
converted_union.get_feature_names(),
)
# test same results
Z_transformed = sp.vstack(dist_union.transform(Z)[:, 'X'].collect())
assert_array_equal(loc_union.transform(X).toarray(), Z_transformed.toarray())
assert_array_equal(loc_union.transform(X).toarray(),
converted_union.transform(X).toarray())
# test same results with fit_transform
X_transformed = loc_union.fit_transform(X)
X_converted_transformed = converted_union.fit_transform(X)
Z_transformed = sp.vstack(dist_union.fit_transform(Z)[:, 'X'].collect())
assert_array_equal(X_transformed.toarray(), Z_transformed.toarray())
assert_array_equal(X_transformed.toarray(),
X_converted_transformed.toarray())
# test same results in parallel
loc_union_par = FeatureUnion([
("chars", loc_char),
("words", loc_word)
], n_jobs=2)
dist_union_par = SparkFeatureUnion([
("chars", dist_char),
("words", dist_word)
], n_jobs=2)
loc_union_par.fit(X)
dist_union_par.fit(Z)
converted_union = dist_union_par.to_scikit()
X_transformed = loc_union_par.transform(X)
Z_transformed = sp.vstack(dist_union_par.transform(Z)[:, 'X'].collect())
assert_array_equal(X_transformed.toarray(), Z_transformed.toarray())
assert_array_equal(X_transformed.toarray(),
converted_union.transform(X).toarray())
def test_same_result(self):
X, Z = self.make_text_rdd(2)
loc_char = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
dist_char = SparkCountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
loc_word = CountVectorizer(analyzer="word")
dist_word = SparkCountVectorizer(analyzer="word")
loc_union = FeatureUnion([
("chars", loc_char),
("words", loc_word)
])
dist_union = SparkFeatureUnion([
("chars", dist_char),
("words", dist_word)
])
# test same feature names
loc_union.fit(X)
dist_union.fit(Z)
assert_equal(
loc_union.get_feature_names(),
dist_union.get_feature_names()
)
# test same results
X_transformed = loc_union.transform(X)
Z_transformed = sp.vstack(dist_union.transform(Z).collect())
assert_array_equal(X_transformed.toarray(), Z_transformed.toarray())
# test same results with fit_transform
X_transformed = loc_union.fit_transform(X)
Z_transformed = sp.vstack(dist_union.fit_transform(Z).collect())
assert_array_equal(X_transformed.toarray(), Z_transformed.toarray())
# test same results in parallel
loc_union_par = FeatureUnion([
("chars", loc_char),
("words", loc_word)
], n_jobs=2)
dist_union_par = SparkFeatureUnion([
("chars", dist_char),
("words", dist_word)
], n_jobs=2)
loc_union_par.fit(X)
dist_union_par.fit(Z)
X_transformed = loc_union_par.transform(X)
Z_transformed = sp.vstack(dist_union_par.transform(Z).collect())
assert_array_equal(X_transformed.toarray(), Z_transformed.toarray())
def test_same_result_weight(self):
X, Z = self.make_text_rdd(2)
loc_char = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
dist_char = SparkCountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
loc_word = CountVectorizer(analyzer="word")
dist_word = SparkCountVectorizer(analyzer="word")
loc_union = FeatureUnion([
("chars", loc_char),
("words", loc_word)
], transformer_weights={"words": 10})
dist_union = SparkFeatureUnion([
("chars", dist_char),
("words", dist_word)
], transformer_weights={"words": 10})
loc_union.fit(X)
dist_union.fit(Z)
X_transformed = loc_union.transform(X)
Z_transformed = sp.vstack(dist_union.transform(Z).collect())
assert_array_equal(X_transformed.toarray(), Z_transformed.toarray())
def test_make_union(self):
svd = SparkTruncatedSVD()
mock = TransfT()
fu = make_sparkunion(svd, mock)
names, transformers = list(zip(*fu.transformer_list))
assert_equal(names, ("sparktruncatedsvd", "transft"))
assert_equal(transformers, (svd, mock))
# def test_params_are_forwarded(self):
# transformer1 = T()
# transformer2 = T()
# pipe = SparkFeatureUnion([('t1', transformer1),
# ('t2', transformer2)])
# print(pipe.get_params(deep=True))
# expected = dict(t1__a=None, t1__b=None,
# t2__a=None, t2__b=None,
# t1=transformer1, t2=transformer2,
# **pipe.get_params(deep=False)
# )
# print(expected)
# assert_equal(pipe.get_params(deep=True), expected)
# # Check that params are set
# pipe.set_params(t1__a=0.1)
# assert_equal(transformer1.a, 0.1)
# assert_equal(transformer1.b, None)
# assert_equal(transformer2.a, None)
# assert_equal(transformer2.b, None)
# # Check that params are set
# _, _, Z = self.make_classification(2, 10000, 2000)
# pipe.fit(Z, t1__a=0.2, t2__a=0.3)
# assert_equal(transformer1.fit_params, {'a': 0.2})
# assert_equal(transformer2.fit_params, {'a': 0.3})
class TestPipeline(SplearnTestCase):
def test_pipeline_init(self):
# Test the various init parameters of the pipeline.
assert_raises(TypeError, SparkPipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, SparkPipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = SparkPipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
vect = SparkCountVectorizer()
filter = SparkVarianceThreshold()
pipe = SparkPipeline([('vect', vect), ('filter', filter)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, SparkPipeline,
[('vect', vect), ('vect', vect)])
# Check that params are set
pipe.set_params(vect__min_df=0.1)
assert_equal(vect.min_df, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, filter__min_df=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['vect'] is pipe2.named_steps['vect'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('vect')
params.pop('filter')
params2.pop('vect')
params2.pop('filter')
assert_equal(params, params2)
def test_pipeline_same_results(self):
X, y, Z = self.make_classification(2, 10000, 2000)
loc_clf = LogisticRegression()
loc_filter = VarianceThreshold()
loc_pipe = Pipeline([
('threshold', loc_filter),
('logistic', loc_clf)
])
dist_clf = SparkLogisticRegression()
dist_filter = SparkVarianceThreshold()
dist_pipe = SparkPipeline([
('threshold', dist_filter),
('logistic', dist_clf)
])
dist_filter.fit(Z)
loc_pipe.fit(X, y)
dist_pipe.fit(Z, logistic__classes=np.unique(y))
assert_true(np.mean(np.abs(
loc_pipe.predict(X) -
np.concatenate(dist_pipe.predict(Z[:, 'X']).collect())
)) < 0.1)
| apache-2.0 |
AstroVPK/libcarma | examples/MBHBCARMAFit.py | 2 | 5298 | import math
import numpy as np
import copy
import random
import psutil
import os
import sys
import pdb
import matplotlib.pyplot as plt
import matplotlib.cm as colormap
import brewer2mpl
try:
import kali.mbhbcarma
except ImportError:
print 'Cannot import kali.mbhbcarma! kali is not setup. Setup kali by sourcing bin/setup.sh'
sys.exit(1)
try:
import kali.carma
except ImportError:
print 'Cannot import kali.carma! kali is not setup. Setup kali by sourcing bin/setup.sh'
sys.exit(1)
plt.ion()
BREAK = True
MULT = 50.0
BURNSEED = 731647386
DISTSEED = 219038190
NOISESEED = 87238923
SAMPLESEED = 36516342
ZSSEED = 384789247
WALKERSEED = 738472981
MOVESEED = 131343786
XSEED = 2348713647
EarthMass = 3.0025138e-12 # 10^6 MSun
SunMass = 1.0e-6 # 10^6 MSun
EarthOrbitRadius = 4.84814e-6 # AU
SunOrbitRadius = 4.84814e-6*(EarthMass/SunMass) # AU
Period = 31557600.0/86164.090530833 # Day
EarthOrbitEccentricity = 0.0167
G = 6.67408e-11
c = 299792458.0
pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679
twoPi = 2.0*pi
Parsec = 3.0857e16
Day = 86164.090530833
Year = 31557600.0
DayInYear = Year/Day
SolarMass = 1.98855e30
DivergingList = ['BrBG', 'PRGn', 'PiYG', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral']
BASEPATH = '/home/vish/Documents/Research/MBHBCARMA/'
p = 1
q = 0
r = kali.mbhbcarma.MBHBCARMATask(p, q).r
dt = 2.0
duration = 20000.0
N2S = 1.0e-18
NWALKERS = 320
NSTEPS = 2000
rho_carma = np.array([-1.0/200.0, 1.0])
theta_carma = kali.carma.coeffs(p, q, rho_carma)
newTask_carma = kali.carma.CARMATask(p, q, nwalkers=NWALKERS, nsteps=NSTEPS)
res_carma = newTask_carma.set(dt, theta_carma)
newLC_carma = newTask_carma.simulate(duration=duration, fracNoiseToSignal=N2S, burnSeed=BURNSEED,
distSeed=DISTSEED, noiseSeed=NOISESEED)
newTask_carma.observe(newLC_carma, noiseSeed=NOISESEED)
lnprior_carma = newTask_carma.logPrior(newLC_carma)
lnlikelihood_carma = newTask_carma.logLikelihood(newLC_carma)
lnposterior_carma = newTask_carma.logPosterior(newLC_carma)
print " LnPrior (CARMA): %+e"%(lnprior_carma)
print "LnLikelihood (CARMA): %+e"%(lnlikelihood_carma)
print " LnPosterior (CARMA): %+e"%(lnposterior_carma)
newTask_carma.fit(newLC_carma)
bestLoc_carma = np.where(newTask_carma.LnPosterior == np.max(newTask_carma.LnPosterior))
bestWalker_carma = bestLoc_carma[0][0]
bestStep_carma = bestLoc_carma[1][0]
bestTheta_carma = newTask_carma.Chain[:, bestWalker_carma, bestStep_carma]
bestTask_carma = kali.carma.CARMATask(p, q, nwalkers=NWALKERS, nsteps=NSTEPS)
bestRes_carma = bestTask_carma.set(dt, bestTheta_carma)
bestLC_carma = bestTask_carma.simulate(duration=duration, fracNoiseToSignal=N2S,
burnSeed=BURNSEED, distSeed=DISTSEED, noiseSeed=NOISESEED)
bestTask_carma.observe(bestLC_carma, noiseSeed=NOISESEED)
theta_mbhbcarma = np.array([0.01, 0.02, 4.0*DayInYear, 0.0, 0.0, 90.0, 0.0, newLC_carma.mean,
theta_carma[0], theta_carma[1]])
newTask_mbhbcarma = kali.mbhbcarma.MBHBCARMATask(p, q, nwalkers=NWALKERS, nsteps=NSTEPS)
res_mbhbcarma = newTask_mbhbcarma.set(dt, theta_mbhbcarma)
newLC_mbhbcarma = newTask_mbhbcarma.simulate(duration=duration, fracNoiseToSignal=N2S,
burnSeed=BURNSEED, distSeed=DISTSEED, noiseSeed=NOISESEED)
newTask_mbhbcarma.observe(newLC_mbhbcarma, noiseSeed=NOISESEED)
lnprior_mbhbcarma = newTask_mbhbcarma.logPrior(newLC_mbhbcarma)
lnlikelihood_mbhbcarma = newTask_mbhbcarma.logLikelihood(newLC_mbhbcarma)
lnposterior_mbhbcarma = newTask_mbhbcarma.logPosterior(newLC_mbhbcarma)
print " LnPrior (MBHBCARMA): %+e"%(lnprior_mbhbcarma)
print "LnLikelihood (MBHBCARMA): %+e"%(lnlikelihood_mbhbcarma)
print " LnPosterior (MBHBCARMA): %+e"%(lnposterior_mbhbcarma)
newTask_mbhbcarma.fit(newLC_mbhbcarma)
bestLoc_mbhbcarma = np.where(newTask_mbhbcarma.LnPosterior == np.max(newTask_mbhbcarma.LnPosterior))
bestWalker_mbhbcarma = bestLoc_mbhbcarma[0][0]
bestStep_mbhbcarma = bestLoc_mbhbcarma[1][0]
bestTheta_mbhbcarma = newTask_mbhbcarma.Chain[:, bestWalker_mbhbcarma, bestStep_mbhbcarma]
bestTask_mbhbcarma = kali.mbhbcarma.MBHBCARMATask(p, q, nwalkers=NWALKERS, nsteps=NSTEPS)
bestRes_mbhbcarma = bestTask_mbhbcarma.set(dt, bestTheta_mbhbcarma)
bestLC_mbhbcarma = bestTask_mbhbcarma.simulate(duration=duration, fracNoiseToSignal=N2S,
burnSeed=BURNSEED, distSeed=DISTSEED, noiseSeed=NOISESEED)
bestTask_mbhbcarma.observe(bestLC_mbhbcarma, noiseSeed=NOISESEED)
newLC_carma.name = r'Un-beamed'
newLC_carma.plot(clearFig=False, colorx='#e41a1c', colory='#fbb4ae')
bestLC_carma.name = r'Un-beamed \& recovered'
bestLC_carma.plot(clearFig=False, colorx='#984ea3', colory='#decbe4')
newLC_mbhbcarma.name = r'Beamed'
newLC_mbhbcarma.plot(clearFig=False, colorx='#377eb8', colory='#b3cde3')
bestLC_mbhbcarma.name = r'Beamed \& recovered'
bestLC_mbhbcarma.plot(clearFig=False, colorx='#4daf4a', colory='#ccebc5')
newTask_mbhbcarma.plotscatter(dimx=2, dimy=7, truthx=4.0*DayInYear, truthy=newLC_mbhbcarma.mean,
labelx=r'$T$ (d)', labely=r'$F$')
newTask_mbhbcarma.plotwalkers(dim=2, truth=4.0*DayInYear, label=r'$T$ (d)')
pdb.set_trace()
| gpl-2.0 |
hitlonewind/PR-experiment | Classifrer/classifer.py | 1 | 4186 | print(__doc__)
#coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show() | mit |
vortex-ape/scikit-learn | examples/svm/plot_svm_anova.py | 7 | 2046 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.feature_selection import SelectPercentile, chi2
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# #############################################################################
# Import some data to play with
X, y = load_digits(return_X_y=True)
# Throw away data, to be in the curse of dimension settings
X = X[:200]
y = y[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
# #############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = SelectPercentile(chi2)
clf = Pipeline([('anova', transform), ('svc', SVC(gamma="auto"))])
# #############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, cv=5, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
evature/android | EvaSDK/evasdk/src/main/jni/webrtc/modules/video_coding/codecs/test/plot_webrtc_test_logs.py | 3 | 13317 | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Plots statistics from WebRTC integration test logs.
Usage: $ python plot_webrtc_test_logs.py filename.txt
"""
import numpy
import sys
import re
import matplotlib.pyplot as plt
# Log events.
EVENT_START = 'RUN ] CodecSettings/PlotVideoProcessorIntegrationTest.'
EVENT_END = 'OK ] CodecSettings/PlotVideoProcessorIntegrationTest.'
# Metrics to plot, tuple: (name to parse in file, label to use when plotting).
BITRATE = ('Target Bitrate', 'target bitrate (kbps)')
WIDTH = ('Width', 'width')
HEIGHT = ('Height', 'height')
FILENAME = ('Filename', 'clip')
CODEC_TYPE = ('Codec type', 'Codec')
ENCODER_IMPLEMENTATION_NAME = ('Encoder implementation name', 'enc name')
DECODER_IMPLEMENTATION_NAME = ('Decoder implementation name', 'dec name')
CODEC_IMPLEMENTATION_NAME = ('Codec implementation name', 'codec name')
CORES = ('#CPU cores used', 'CPU cores used')
DENOISING = ('Denoising', 'denoising')
RESILIENCE = ('Resilience', 'resilience')
ERROR_CONCEALMENT = ('Error concealment', 'error concealment')
QP = ('Average QP', 'avg QP')
PSNR = ('PSNR avg', 'PSNR (dB)')
SSIM = ('SSIM avg', 'SSIM')
ENC_BITRATE = ('Encoding bitrate', 'encoded bitrate (kbps)')
FRAMERATE = ('Frame rate', 'fps')
NUM_FRAMES = ('Number of processed frames', 'num frames')
NUM_DROPPED_FRAMES = ('Number of dropped frames', 'num dropped frames')
NUM_FRAMES_TO_TARGET = ('Number of frames to approach target rate',
'frames to reach target rate')
ENCODE_TIME = ('Encoding time', 'encode time (us)')
ENCODE_TIME_AVG = ('Encoding time', 'encode time (us) avg')
DECODE_TIME = ('Decoding time', 'decode time (us)')
DECODE_TIME_AVG = ('Decoding time', 'decode time (us) avg')
FRAME_SIZE = ('Frame sizes', 'frame size (bytes)')
FRAME_SIZE_AVG = ('Frame sizes', 'frame size (bytes) avg')
AVG_KEY_FRAME_SIZE = ('Average key frame size', 'avg key frame size (bytes)')
AVG_NON_KEY_FRAME_SIZE = ('Average non-key frame size',
'avg non-key frame size (bytes)')
# Settings.
SETTINGS = [
WIDTH,
HEIGHT,
FILENAME,
NUM_FRAMES,
ENCODE_TIME,
DECODE_TIME,
FRAME_SIZE,
]
# Settings, options for x-axis.
X_SETTINGS = [
CORES,
FRAMERATE,
DENOISING,
RESILIENCE,
ERROR_CONCEALMENT,
BITRATE, # TODO(asapersson): Needs to be last.
]
# Settings, options for subplots.
SUBPLOT_SETTINGS = [
CODEC_TYPE,
ENCODER_IMPLEMENTATION_NAME,
DECODER_IMPLEMENTATION_NAME,
CODEC_IMPLEMENTATION_NAME,
] + X_SETTINGS
# Results.
RESULTS = [
PSNR,
SSIM,
ENC_BITRATE,
NUM_DROPPED_FRAMES,
NUM_FRAMES_TO_TARGET,
ENCODE_TIME_AVG,
DECODE_TIME_AVG,
QP,
AVG_KEY_FRAME_SIZE,
AVG_NON_KEY_FRAME_SIZE,
]
METRICS_TO_PARSE = SETTINGS + SUBPLOT_SETTINGS + RESULTS
Y_METRICS = [res[1] for res in RESULTS]
# Parameters for plotting.
FIG_SIZE_SCALE_FACTOR_X = 1.6
FIG_SIZE_SCALE_FACTOR_Y = 1.8
GRID_COLOR = [0.45, 0.45, 0.45]
def ParseSetting(filename, setting):
"""Parses setting from file.
Args:
filename: The name of the file.
setting: Name of setting to parse (e.g. width).
Returns:
A list holding parsed settings, e.g. ['width: 128.0', 'width: 160.0'] """
settings = []
settings_file = open(filename)
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_START, line):
# Parse event.
parsed = {}
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_END, line):
# Add parsed setting to list.
if setting in parsed:
s = setting + ': ' + str(parsed[setting])
if s not in settings:
settings.append(s)
break
TryFindMetric(parsed, line, settings_file)
settings_file.close()
return settings
def ParseMetrics(filename, setting1, setting2):
"""Parses metrics from file.
Args:
filename: The name of the file.
setting1: First setting for sorting metrics (e.g. width).
setting2: Second setting for sorting metrics (e.g. CPU cores used).
Returns:
A dictionary holding parsed metrics.
For example:
metrics[key1][key2][measurement]
metrics = {
"width: 352": {
"CPU cores used: 1.0": {
"encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
"CPU cores used: 2.0": {
"encode time (us)": [0.718005, 0.806925, 0.909726, 0.931835, 0.953642],
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
},
"width: 176": {
"CPU cores used: 1.0": {
"encode time (us)": [0.857897, 0.91608, 0.959173, 0.971116, 0.980961],
"PSNR (dB)": [30.243646, 33.375592, 37.574387, 39.42184, 41.437897],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
}
} """
metrics = {}
# Parse events.
settings_file = open(filename)
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_START, line):
# Parse event.
parsed = {}
while True:
line = settings_file.readline()
if not line:
break
if re.search(r'%s' % EVENT_END, line):
# Add parsed values to metrics.
key1 = setting1 + ': ' + str(parsed[setting1])
key2 = setting2 + ': ' + str(parsed[setting2])
if key1 not in metrics:
metrics[key1] = {}
if key2 not in metrics[key1]:
metrics[key1][key2] = {}
for label in parsed:
if label not in metrics[key1][key2]:
metrics[key1][key2][label] = []
metrics[key1][key2][label].append(parsed[label])
break
TryFindMetric(parsed, line, settings_file)
settings_file.close()
return metrics
def TryFindMetric(parsed, line, settings_file):
for metric in METRICS_TO_PARSE:
name = metric[0]
label = metric[1]
if re.search(r'%s' % name, line):
found, value = GetMetric(name, line)
if not found:
# TODO(asapersson): Change format.
# Try find min, max, average stats.
found, minimum = GetMetric("Min", settings_file.readline())
if not found:
return
found, maximum = GetMetric("Max", settings_file.readline())
if not found:
return
found, average = GetMetric("Average", settings_file.readline())
if not found:
return
parsed[label + ' min'] = minimum
parsed[label + ' max'] = maximum
parsed[label + ' avg'] = average
parsed[label] = value
return
def GetMetric(name, string):
# Float (e.g. bitrate = 98.8253).
pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name
m = re.search(r'%s' % pattern, string)
if m is not None:
return StringToFloat(m.group(1))
# Alphanumeric characters (e.g. codec type : VP8).
pattern = r'%s\s*[:=]\s*(\w+)' % name
m = re.search(r'%s' % pattern, string)
if m is not None:
return True, m.group(1)
return False, -1
def StringToFloat(value):
try:
value = float(value)
except ValueError:
print "Not a float, skipped %s" % value
return False, -1
return True, value
def Plot(y_metric, x_metric, metrics):
"""Plots y_metric vs x_metric per key in metrics.
For example:
y_metric = 'PSNR (dB)'
x_metric = 'bitrate (kbps)'
metrics = {
"CPU cores used: 1.0": {
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
"CPU cores used: 2.0": {
"PSNR (dB)": [25.546029, 29.465518, 34.723535, 36.428493, 38.686551],
"bitrate (kbps)": [50, 100, 300, 500, 1000]
},
}
"""
for key in sorted(metrics):
data = metrics[key]
if y_metric not in data:
print "Failed to find metric: %s" % y_metric
continue
y = numpy.array(data[y_metric])
x = numpy.array(data[x_metric])
if len(y) != len(x):
print "Length mismatch for %s, %s" % (y, x)
continue
label = y_metric + ' - ' + str(key)
plt.plot(x, y, label=label, linewidth=1.5, marker='o', markersize=5,
markeredgewidth=0.0)
def PlotFigure(settings, y_metrics, x_metric, metrics, title):
"""Plots metrics in y_metrics list. One figure is plotted and each entry
in the list is plotted in a subplot (and sorted per settings).
For example:
settings = ['width: 128.0', 'width: 160.0']. Sort subplot per setting.
y_metrics = ['PSNR (dB)', 'PSNR (dB)']. Metric to plot per subplot.
x_metric = 'bitrate (kbps)'
"""
plt.figure()
plt.suptitle(title, fontsize='large', fontweight='bold')
settings.sort()
rows = len(settings)
cols = 1
pos = 1
while pos <= rows:
plt.rc('grid', color=GRID_COLOR)
ax = plt.subplot(rows, cols, pos)
plt.grid()
plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large')
plt.setp(ax.get_yticklabels(), fontsize='large')
setting = settings[pos - 1]
Plot(y_metrics[pos - 1], x_metric, metrics[setting])
if setting.startswith(WIDTH[1]):
plt.title(setting, fontsize='medium')
plt.legend(fontsize='large', loc='best')
pos += 1
plt.xlabel(x_metric, fontsize='large')
plt.subplots_adjust(left=0.06, right=0.98, bottom=0.05, top=0.94, hspace=0.08)
def GetTitle(filename, setting):
title = ''
if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]:
codec_types = ParseSetting(filename, CODEC_TYPE[1])
for i in range(0, len(codec_types)):
title += codec_types[i] + ', '
if setting != CORES[1]:
cores = ParseSetting(filename, CORES[1])
for i in range(0, len(cores)):
title += cores[i].split('.')[0] + ', '
if setting != FRAMERATE[1]:
framerate = ParseSetting(filename, FRAMERATE[1])
for i in range(0, len(framerate)):
title += framerate[i].split('.')[0] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1] and
setting != ENCODER_IMPLEMENTATION_NAME[1]):
enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(enc_names)):
title += enc_names[i] + ', '
if (setting != CODEC_IMPLEMENTATION_NAME[1] and
setting != DECODER_IMPLEMENTATION_NAME[1]):
dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1])
for i in range(0, len(dec_names)):
title += dec_names[i] + ', '
filenames = ParseSetting(filename, FILENAME[1])
title += filenames[0].split('_')[0]
num_frames = ParseSetting(filename, NUM_FRAMES[1])
for i in range(0, len(num_frames)):
title += ' (' + num_frames[i].split('.')[0] + ')'
return title
def ToString(input_list):
return ToStringWithoutMetric(input_list, ('', ''))
def ToStringWithoutMetric(input_list, metric):
i = 1
output_str = ""
for m in input_list:
if m != metric:
output_str = output_str + ("%s. %s\n" % (i, m[1]))
i += 1
return output_str
def GetIdx(text_list):
return int(raw_input(text_list)) - 1
def main():
filename = sys.argv[1]
# Setup.
idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS))
if idx_metric == -1:
# Plot all metrics. One subplot for each metric.
# Per subplot: metric vs bitrate (per resolution).
cores = ParseSetting(filename, CORES[1])
setting1 = CORES[1]
setting2 = WIDTH[1]
sub_keys = [cores[0]] * len(Y_METRICS)
y_metrics = Y_METRICS
x_metric = BITRATE[1]
else:
resolutions = ParseSetting(filename, WIDTH[1])
idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS))
if X_SETTINGS[idx] == BITRATE:
idx = GetIdx("Plot per:\n%s" % ToStringWithoutMetric(SUBPLOT_SETTINGS,
BITRATE))
idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx])
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs bitrate (per setting).
setting1 = WIDTH[1]
setting2 = METRICS_TO_PARSE[idx_setting][1]
sub_keys = resolutions
y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
x_metric = BITRATE[1]
else:
# Plot one metric. One subplot for each resolution.
# Per subplot: metric vs setting (per bitrate).
setting1 = WIDTH[1]
setting2 = BITRATE[1]
sub_keys = resolutions
y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys)
x_metric = X_SETTINGS[idx][1]
metrics = ParseMetrics(filename, setting1, setting2)
# Stretch fig size.
figsize = plt.rcParams["figure.figsize"]
figsize[0] *= FIG_SIZE_SCALE_FACTOR_X
figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y
plt.rcParams["figure.figsize"] = figsize
PlotFigure(sub_keys, y_metrics, x_metric, metrics,
GetTitle(filename, setting2))
plt.show()
if __name__ == '__main__':
main()
| mit |
hmendozap/auto-sklearn | test/test_pipeline/components/data_preprocessing/test_scaling.py | 1 | 2446 | import unittest
import numpy as np
import sklearn.datasets
from autosklearn.pipeline.components.data_preprocessing.rescaling import RescalingChoice
from autosklearn.pipeline.util import get_dataset
class ScalingComponentTest(unittest.TestCase):
def _test_helper(self, Preprocessor, dataset=None, make_sparse=False):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=make_sparse)
original_X_train = X_train.copy()
configuration_space = Preprocessor.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Preprocessor(random_state=1,
**{hp_name: default[hp_name] for hp_name in
default if default[hp_name] is not None})
preprocessor = preprocessor.choice
transformer = preprocessor.fit(X_train, Y_train)
return transformer.transform(X_train), original_X_train
def test_boston_is_not_scaled(self):
data = sklearn.datasets.load_boston()['data']
self.assertGreaterEqual(np.max(data), 100)
def test_default_configuration(self):
transformations = []
for i in range(10):
transformation, original = self._test_helper(RescalingChoice,
dataset='boston')
# The maximum is around 1.95 for the transformed array...
self.assertLessEqual(np.max(transformation), 2)
self.assertFalse((original == transformation).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue(
(transformations[-1] == transformations[-2]).all())
def test_default_configuration_with_sparse_data(self):
preprocessing = self._test_helper(RescalingChoice, dataset='boston',
make_sparse=True)
transformation, original = preprocessing
self.assertEqual(original.getnnz(), transformation.getnnz())
self.assertAlmostEqual(1, transformation.max(), places=6)
self.assertTrue(~np.allclose(original.data, transformation.data))
@unittest.skip("Does not work at the moment.")
def test_preprocessing_dtype(self):
super(ScalingComponentTest, self)._test_helper(
RescalingChoice)
| bsd-3-clause |
saketkc/bio-tricks | meme_parser/meme_processory.py | 1 | 2759 | #!/usr/bin/env python
"""
Process meme.txt files to
generate conservation plots
"""
import argparse
import csv
import sys
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
from Bio import motifs
def plot_meme_against_phylo(meme_record, phylo):
sns.set(style="darkgrid")
def position_wise_profile(counts_dict, length):
profile = map(dict, zip(*[[(k, v) for v in value] for k, value in counts_dict.items()]))
return profile
def find_max_occurence(profile, max_count=2):
sorted_profile = []
for p in profile:
sorted_profile.append(sorted(p.items(), key=lambda x:x[1]))
for i,p in enumerate(sorted_profile):
sorted_profile[i] = p[-max_count:]
return sorted_profile
def main(argv):
parser = argparse.ArgumentParser(description='Process meme files')
parser.add_argument('-i', '--meme', metavar='<meme_out>', help='Meme input file', required=True)
parser.add_argument('-m', '--motif', metavar='<motif_no>', help='Motif number', required=True, type=int)
parser.add_argument('-c', '--phylo', metavar='<phylo_out>', help='PhyloP conservation scores', required=True)
parsed = parser.parse_args(argv)
handle = open(parsed.meme)
records = motifs.parse(handle, 'meme')
record = records[parsed.motif-1]
phylo_data = csv.reader(open(parsed.phylo,'r'), delimiter='\t')
phylo_scores = []
for line in phylo_data:
phylo_scores.append(float(line[2]))
print "Motif length", record.length
print "phylo length", len(phylo_scores)
profile = position_wise_profile(record.counts, record.length)
max_occur = find_max_occurence(profile, max_count=1)
motif_scores = []
for position in max_occur:
motif_scores.append(position[0][1])
pr = pearsonr(np.array(motif_scores), np.array(phylo_scores))
print 'Pearson correlation: {}'.format(pr)
fig, ax = plt.subplots()
ax= sns.regplot(y=np.array(motif_scores), x=np.array(phylo_scores), scatter=True)
ax.set(ylabel="Count of most freq nucleotide", xlabel="PhyloP scores", title='CTCF | pearsonr = {}, p-val={}'.format(pr[0],pr[1]));
fig.savefig('{}_motif{}_scatter.png'.format(parsed.phylo, parsed.motif))
x = np.linspace(1,len(phylo_scores)+1,num=len(phylo_scores), endpoint=False)
f, (ax1, ax2) = plt.subplots(2, 1)
x1 = sns.barplot(x,y=np.array(motif_scores), ax=ax1)
x2 = sns.barplot(x,y=np.array(phylo_scores), ax=ax2)
x1.set(ylabel='Counts of most freq nucleotide', xlabel='Position in motif')
x2.set(ylabel='Phylop Score', xlabel='Position in motif')
f.tight_layout()
f.savefig('{}_motif{}_trend.png'.format(parsed.phylo, parsed.motif))
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
NelisVerhoef/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
jmd-dk/concept | concept/tests/fluid_vacuum/analyze.py | 1 | 4997 | # This file has to be run in pure Python mode!
# Imports from the CO𝘕CEPT code
from commons import *
from snapshot import load
import species
plt = get_matplotlib().pyplot
# Absolute path and name of the directory of this file
this_dir = os.path.dirname(os.path.realpath(__file__))
this_test = os.path.basename(this_dir)
# Read in data from the snapshots
species.allow_similarly_named_components = True
fluids = {'particles simulations': [], 'fluid simulations': []}
times = []
for kind in ('particles', 'fluid'):
if kind == 'particles':
regex = '{}/output/{}/snapshot_t=*_converted*'.format(this_dir, kind)
elif kind == 'fluid':
regex = '{}/output/{}/snapshot_t=*'.format(this_dir, kind)
for fname in sorted(glob(regex),
key=lambda s: s[(s.index('=') + 1):]):
snapshot = load(fname, compare_params=False)
fluids[kind + ' simulations'].append(snapshot.components[0])
if kind == 'particles':
times.append(float(re.search('snapshot_t=(.*)' + unit_time, fname).group(1)))
N_snapshots = len(times)
gridsize = fluids['particles simulations'][0].gridsize
# Sort data chronologically
order = np.argsort(times)
times = [times[o] for o in order]
for kind in ('particles', 'fluid'):
fluids[kind + ' simulations'] = [fluids[kind + ' simulations'][o] for o in order]
# Use precise times
times = snapshot_times['t']
# Begin analysis
masterprint('Analysing {} data ...'.format(this_test))
# Plot
fig_file = this_dir + '/result.png'
fig, ax = plt.subplots(N_snapshots, sharex=True, sharey=True, figsize=(8, 3*N_snapshots))
x = [boxsize*i/gridsize for i in range(gridsize)]
ϱ = {'particles simulations': [], 'fluid simulations': []}
for kind, markersize in zip(('particles', 'fluid'), (15, 10)):
for ax_i, fluid, t in zip(ax, fluids[kind + ' simulations'], times):
ϱ[kind + ' simulations'].append(fluid.ϱ.grid_noghosts[:gridsize,
:gridsize,
:gridsize].mean((1, 2)))
ax_i.plot(x, ϱ[kind + ' simulations'][-1],
'.',
markersize=markersize,
alpha=0.7,
label=(kind.rstrip('s').capitalize() + ' simulation'),
)
ax_i.set_ylabel(
r'$\varrho$ $\mathrm{{[{}\,m_{{\odot}}\,{}^{{-3}}]}}$'
.format(
significant_figures(
1/units.m_sun,
3,
fmt='tex',
incl_zeros=False,
scientific=False,
),
unit_length,
)
)
ax_i.set_title(rf'$t={t:.3g}\,\mathrm{{{unit_time}}}$')
plt.xlim(0, boxsize)
plt.xlabel(rf'$x\,\mathrm{{[{unit_length}]}}$')
ax[0].legend(loc='best').get_frame().set_alpha(0.7)
plt.tight_layout()
plt.savefig(fig_file)
# Fluid elements in yz-slices should all have the same ϱ and J
tol_fac = 1e-6
for kind in ('particles', 'fluid'):
for fluid, t in zip(fluids[kind + ' simulations'], times):
for fluidscalar in fluid.iterate_fluidscalars():
grid = fluidscalar.grid_noghosts[:gridsize, :gridsize, :gridsize]
for i in range(gridsize):
yz_slice = grid[i, :, :]
if not isclose(np.std(yz_slice), 0,
rel_tol=0,
abs_tol=max((tol_fac*np.std(grid), 1e+1*gridsize**2*machine_ϵ))):
abort('Non-uniformities have emerged after {} {} '
'in yz-slices of fluid scalar variable {} '
'in {} simulation.\n'
'See "{}" for a visualization.'
.format(t, unit_time, fluidscalar, kind.rstrip('s'), fig_file))
# Compare ϱ's from the fluid and snapshot simulations
discontinuity_tol = 2
rel_tol = 0.1
for ϱ_fluid, ϱ_particles, t in zip(ϱ['fluid simulations'], ϱ['particles simulations'], times):
abs_tol = rel_tol*np.std(ϱ_particles)
slope_left = ϱ_particles - np.roll(ϱ_particles, -1)
slope_right = np.roll(ϱ_particles, +1) - ϱ_particles
discontinuities = abs(slope_right - slope_left)
discontinuities = [max(d) for d in zip(*[np.roll(discontinuities, r) for r in range(-3, 4)])]
if not all(isclose(ϱ_fluid_i, ϱ_particles_i,
rel_tol=0,
abs_tol=(discontinuity_tol*discontinuity + abs_tol),
) for ϱ_fluid_i, ϱ_particles_i, discontinuity in zip(ϱ_fluid,
ϱ_particles,
discontinuities)):
abort('Fluid did not evolve correctly up to t = {} {}.\n'
'See "{}" for a visualization.'
.format(t, unit_time, fig_file))
# Done analysing
masterprint('done')
| gpl-3.0 |
robintw/scikit-image | skimage/viewer/utils/core.py | 18 | 6556 | import warnings
import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warnings.warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| bsd-3-clause |
freemindhv/tq-python | docs/conf.py | 1 | 8134 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../tq")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tq'
copyright = u'2014, freeMind'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from tq import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tq-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'tq Documentation',
u'freeMind', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| gpl-2.0 |
huongttlan/bokeh | bokeh/tests/test_protocol.py | 42 | 3959 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
self.assertEqual(self.encoder.default(nptrue), True)
self.assertIsInstance(self.encoder.default(nptrue), bool)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh.protocol import serialize_json, deserialize_json
self.serialize = serialize_json
self.deserialize = deserialize_json
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
moble/sympy | examples/intermediate/sample.py | 107 | 3494 | """
Utility functions for plotting sympy functions.
See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d
graphing functions using matplotlib.
"""
from sympy.core.sympify import sympify, SympifyError
from sympy.external import import_module
np = import_module('numpy')
def sample2d(f, x_args):
"""
Samples a 2d function f over specified intervals and returns two
arrays (X, Y) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot2d.py.
f is a function of one variable, such as x**2.
x_args is an interval given in the form (var, min, max, n)
"""
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except AttributeError:
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = np.arange(float(x_min), float(x_max) + x_d, x_d)
Y = np.empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y
def sample3d(f, x_args, y_args):
"""
Samples a 3d function f over specified intervals and returns three
2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot3d.py.
f is a function of two variables, such as x**2 + y**2.
x_args and y_args are intervals given in the form (var, min, max, n)
"""
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except AttributeError:
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = np.array(x)
y = np.array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = np.repeat(x, numRows, 0)
y.shape = numRows, 1
Y = np.repeat(y, numCols, 1)
return X, Y
X, Y = np.meshgrid(x_a, y_a)
Z = np.ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z
def sample(f, *var_args):
"""
Samples a 2d or 3d function over specified intervals and returns
a dataset suitable for plotting with matlab (matplotlib) syntax.
Wrapper for sample2d and sample3d.
f is a function of one or two variables, such as x**2.
var_args are intervals for each variable given in the form (var, min, max, n)
"""
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
| bsd-3-clause |
zhonghualiu/FaST-LMM | fastlmm/inference/glmm.py | 1 | 26306 | import scipy as SP
import scipy.stats as ST
import numpy as NP
from numpy import dot
from scipy.linalg import cholesky,solve_triangular
from fastlmm.external.util.math import check_definite_positiveness,check_symmetry,ddot,dotd,trace2
from fastlmm.external.util.math import stl, stu
from sklearn.base import BaseEstimator
from fastlmm import Pr
import sys
'''
Important! Always run test.py in the current folder for unit testing after
changes have been made.
'''
class GLMM(object):
'''
generalized linear mixed model having up to two linear kernels
f ~ N(X*beta, sig02*K0 + sig12*K1 + sign2*I),
y ~ Bern(link(f))
where
K0 = G0*G0^T
K1 = G1*G1^T
'''
def __init__(self, penalty=None, penalizeBias=False, debug=False):
'''
Input:
approx : 'laplace' or 'ep'
link : 'logistic' or 'erf'
penalty : None, 'l1' or 'l2'
penalizeBias : True or False
'''
self._debug = debug
self._X=None
self._y=None
self._sig02=0.0
self._sig12=0.0
self._sign2=0.0
self._G0 = None
self._G1 = None
self._isK0Set=False
self._isK1Set=False
self._beta=None
self._mapIn2OutY=None
self._mapOut2InY=None
self._outYType=None
assert penalty in set([None,'l1','l2'])
assert penalty != 'l1', 'l1 penalizer is not fully implemented yet.'
self._penalty = penalty
self._penalizeBias = penalizeBias
self._hasBias = False
self._biasIndex = None
# penalization weights
self._lambdaBeta = 0.1
self._lambdaSig02 = 0.1
self._lambdaSig12 = 0.1
self._lambdaSign2 = 0.1
self._updateConstantsCount = 1
self._updateApproximationCount = 1
def setX(self, X):
'''
set the fixed effects X (covariates).
--------------------------------------------------------------------------
Input:
X : [N*D] 2-dimensional array of covariates
--------------------------------------------------------------------------
'''
assert type(X) is NP.ndarray, 'X must be a numpy.ndarray.'
assert NP.all(NP.isfinite(X) & ~NP.isnan(X)), 'X must have only numbers.'
assert len(X.shape)==2, 'X must be a 2-dimensional array.'
assert X.shape[0] > 0
assert X.shape[1] > 0
self._X = X
self._updateConstantsCount += 1
self._hasBias = False
for i in xrange(X.shape[1]):
if len(NP.unique(X[:,i])) == 0 and X[0,i] == 1.0:
self._hasBias = True
self._biasIndex = i
break
def sety(self, y):
'''
set the phenotype y.
--------------------------------------------------------------------------
Input:
y : [N] 1-dimensional array of phenotype values (-1.0 or 1.0)
--------------------------------------------------------------------------
'''
assert type(y) is NP.ndarray, 'y must be a numpy.ndarray.'
assert len(y.shape)==1, 'y must be a 1-dimensional array.'
assert y.shape[0] > 0
uniquey = list(set(y))
assert len(uniquey)==2, 'y must have two unique values.'
uniquey = sorted(uniquey)
self._mapIn2OutY = {-1.0:uniquey[0],+1.0:uniquey[1]}
self._mapOut2InY = {uniquey[0]:-1.0,uniquey[1]:+1.0}
self._outYType = y.dtype
self._y = NP.empty(y.shape[0])
self._y[y==uniquey[0]] = -1.0
self._y[y==uniquey[1]] = +1.0
self._updateConstantsCount += 1
def setG(self, G0, G1=None):
'''
set the kernels K0 and K1 from G0 and G1.
----------------------------------------------------------------------------
Input:
G0 : [N*k0] array of random effects
G1 : [N*k1] array of random effects (optional)
-----------------------------------------------------------------------------
'''
assert type(G0) is NP.ndarray, 'G0 must be a numpy.ndarray.'
assert NP.all(NP.isfinite(G0) & ~NP.isnan(G0)), 'G0 must have only numbers.'
assert len(G0.shape)==2, 'G0 must be a 2-dimensional array.'
assert G0.shape[0] > 0
assert G0.shape[1] > 0
self._G0 = G0
if G1 is not None:
assert type(G1) is NP.ndarray, 'G1 must be a numpy.ndarray.'
assert NP.all(NP.isfinite(G1) & ~NP.isnan(G1)), 'G1 must have only numbers.'
assert len(G1.shape)==2, 'G1 must be a 2-dimensional array.'
assert G1.shape[0] > 0
assert G1.shape[1] > 0
self._G1 = G1
self._isK0Set = G0 is not None
self._isK1Set = G1 is not None
self._updateConstantsCount += 1
# Operation dot(A,K).
def _ldotK(self, A):
pass
# Operation dot(K,A).
def _rdotK(self, A):
pass
# Return K's diagonal.
def _dKn(self):
pass
def printDebug(self):
pass
'''
-------------------------------------------------------------------------------
Begin of hyperparameters
'''
@property
def sig02(self):
return self._sig02
@sig02.setter
def sig02(self, v):
assert NP.isscalar(v)
assert NP.isfinite(v)
if self._sig02 == v:
return
self._updateApproximationCount += 1
self._sig02 = v
@property
def sig12(self):
return self._sig12
@sig12.setter
def sig12(self, v):
assert NP.isscalar(v)
assert NP.isfinite(v)
if self._sig12 == v:
return
self._updateApproximationCount += 1
self._sig12 = v
@property
def sign2(self):
return self._sign2
@sign2.setter
def sign2(self, v):
assert NP.isscalar(v)
assert NP.isfinite(v)
if self._sign2 == v:
return
self._updateApproximationCount += 1
self._sign2 = v
@property
def beta(self):
return self._beta.copy()
@beta.setter
def beta(self, v):
assert NP.all(NP.isfinite(v))
if NP.all(self._beta == v):
return
self._updateApproximationCount += 1
self._beta = v.copy()
'''
End of hyperparameters
-------------------------------------------------------------------------------
'''
def _xMeanCov(self, xX, xG0, xG1):
'''
Computes the mean and covariance between the latent variables.
You can provide one or n latent variables.
----------------------------------------------------------------------------
Input:
xX : [D] array of fixed effects or
[n*D] array of fixed effects for each latent variable
xG0 : [k0] array of random effects or
[n*k0] array of random effects for each latent variable
xG1 : [k1] array of random effects or
[n*k1] array of random effects for each latent variable
-----------------------------------------------------------------------------
Output:
xmean : [n*D] means of the provided latent variables
xK01 : [N] or [n*N] covariance between provided and prior latent
variables
xkk : float or [n] covariance between provided latent variables
-----------------------------------------------------------------------------
'''
xmean = xX.dot(self._beta)
if xG0 is not None:
xK01 = self._sig02*(xG0.dot(self._G0.T))
else:
if len(xX.shape)==1:
xK01 = NP.zeros(self._N)
else:
xK01 = NP.zeros((xX.shape[0], self._N))
if len(xG0.shape)==1:
xkk = self._sig02 * xG0.dot(xG0) + self._sign2
else:
xkk = self._sig02 * dotd(xG0,xG0.T) + self._sign2
if xG1 is not None:
xK01 += self._sig12*(xG1.dot(self._G1.T))
if len(xG0.shape)==1:
xkk += self._sig12 * xG1.dot(xG1)
else:
xkk += self._sig12 * dotd(xG1,xG1.T)
return (xmean,xK01,xkk)
def _predict(self, xmean, xK01, xkk, prob):
pass
def predict(self, xX, xG0, xG1=None, prob=True):
'''
Compute the probability of y=1.0, for each provided latent variable.
It can instead return only the most probable class by setting prob=False.
--------------------------------------------------------------------------
Input:
xX : [D] array of fixed effects or
[n*D] array of fixed effects for each latent variable
xG0 : [k0] array of random effects or
[n*k0] array of random effects for each latent variable
xG1 : [k1] array of random effects or
[n*k1] array of random effects for each latent variable
prob : whether you want the probabilities or not.
--------------------------------------------------------------------------
Output:
Array of probabilities or the most probable classes.
--------------------------------------------------------------------------
'''
self._updateConstants()
if len(xX.shape)==1:
assert xX.shape[0]==self._D
assert xG0.shape[0]==self._G0.shape[1]
if xG1 is not None:
assert xG1.shape[0]==self._G1.shape[1]
elif len(xX.shape)==2:
assert xX.shape[1]==self._D
assert xG0.shape[1]==self._G0.shape[1]
if xG1 is not None:
assert xG1.shape[1]==self._G1.shape[1]
else:
assert False
(xmean,xK01,xkk) = self._xMeanCov(xX, xG0, xG1)
ps = self._predict(xmean, xK01, xkk, prob)
if prob==False:
return self._in2outy(ps)
return ps
def _in2outy(self, yin):
'''
Translates the labels used in this class (-1.0, 1.0) to the labels
being used by the user.
'''
yout = NP.empty(yin.shape[0], dtype=self._outYType)
yout[yin == -1] = self._mapIn2OutY[-1]
yout[yin == +1] = self._mapIn2OutY[+1]
return yout
def _updateApproximation(self):
pass
def optimize(self, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
'''
Minimize the cost, which is the negative of marginal loglikelihood plus the
penalty, by adjusting the hyperparameters.
--------------------------------------------------------------------------
Input:
optSig02, optSig12, optSign2, and optSigBeta can be True or False and
are used to choose which hyperparameters are going to be optimized.
--------------------------------------------------------------------------
'''
self._updateConstants()
tol = 1e-6
niterNoImprov = 5
lowerBound = 1e-6
betaScale = 4.0
# initial values
if optSig02:
self.sig02 = 1.0
if optSig12:
self.sig12 = lowerBound
if optSign2:
self.sign2 = lowerBound
if optBeta:
self.beta = NP.zeros(self._D)
bestCost = self._optimize(lowerBound, optSig02, optSig12, optSign2, optBeta)
bestSolution = self._wrap_hyp(True, True, True, True)
i = 0
ntries = 0
while i < niterNoImprov:
if optSig02:
self.sig02 = max(NP.random.chisquare(1), lowerBound)
if optSig12:
self.sig12 = max(NP.random.chisquare(1), lowerBound)
if optSign2:
self.sign2 = max(NP.random.chisquare(1), lowerBound)
if optBeta:
self.beta = NP.random.normal(scale=betaScale, size=self._D)
cost = self._optimize(lowerBound, optSig02, optSig12, optSign2, optBeta)
if cost < bestCost:
bestCost = cost
bestSolution = self._wrap_hyp(True, True, True, True)
if abs(cost-bestCost) > tol:
i = 0
else:
i += 1
else:
i += 1
ntries += 1
#if self._verbose:
# Pr.prin('Number of tries: {}.'.format(ntries))
self._unwrap_hyp(bestSolution, True, True, True, True)
# If K0 is null, marginal likelihood is independent on
# self._sig02. Thus, let it be 0.0 by default.
if not self._isK0Set:
self.sig02 = 0.0
# If K1 is null, marginal likelihood is independent on
# self._sig12. Thus, let it be 0.0 by default.
if not self._isK1Set:
self.sig12 = 0.0
self._check_sigmas_at_zero(lowerBound, optSig02, optSig12, optSign2, optBeta)
def _optimize(self, lowerBound, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
def func(x):
self._unwrap_hyp(x, optSig02, optSig12, optSign2, optBeta)
return -self.marginal_loglikelihood()
def grad(x):
self._unwrap_hyp(x, optSig02, optSig12, optSign2, optBeta)
return -self._mll_gradient()
x = self._wrap_hyp(optSig02, optSig12, optSign2, optBeta)
# bounds on sig02, sig12, sign2, and w
bounds = []
maxsig2 = 50.0
maxbeta = 50.0
if optSig02:
bounds.append((lowerBound,maxsig2))
if optSig12:
bounds.append((lowerBound,maxsig2))
if optSign2:
bounds.append((lowerBound,maxsig2))
if optBeta:
bounds += [(-maxbeta,maxbeta)]*self._D
(xfinal,aa,bb) = SP.optimize.fmin_tnc(func, x, fprime=grad, bounds=bounds, disp=0)
# (xfinal,cost,msgs) = SP.optimize.fmin_l_bfgs_b(func, x, fprime=grad, bounds=bounds,
# disp=self._verbose)
# disp=False)
self._unwrap_hyp(xfinal, optSig02, optSig12, optSign2, optBeta)
marg = self.marginal_loglikelihood()
# marg = -cost
#if self._verbose:
# Pr.prin('Best hyperparameters. sig02 '+str(self._sig02)+' sig12 '+str(self._sig12)+' sign2 '+str(self._sign2)+' beta '+str(self._beta))
# Pr.prin('marg '+str(marg))
return -marg
def _check_sigmas_at_zero(self, lowerBound, optSig02, optSig12, optSign2, optBeta):
pmargll = self.marginal_loglikelihood()
psig02 = self._sig02
if optSig02:
if abs(self._sig02-lowerBound)<1e-7:
self.sig02 = 0.0
psig12 = self._sig12
if optSig12:
if abs(self._sig12-lowerBound)<1e-7:
self.sig12 = 0.0
psign2 = self._sign2
if optSign2:
if abs(self._sign2-lowerBound)<1e-7:
self.sign2 = 0.0
margll = self.marginal_loglikelihood()
if margll < pmargll:
self.sig02 = psig02
self.sig12 = psig12
self.sign2 = psign2
def _is_kernel_zero(self):
return self._sig02==0.0 and self._sig12==0.0 and self._sign2==0.0
def marginal_loglikelihood(self):
'''
Returns (regular marginal loglikelihood) - penalty
'''
margll = self._regular_marginal_loglikelihood()
if self._penalty is None:
return margll
if self._penalizeBias:
assert self._hasBias, "You set to penalize bias but there isn't one."
beta = self.beta
else:
beta = self._betaNoBias()
if self._penalty == 'l1':
assert False, 'Not implemented yet.'
return margll - self._lambdaBeta * sum(abs(beta)) - self._lambdaSign2 * abs(self.sign2)
elif self._penalty == 'l2':
#return margll - self._lambdaBeta * NP.dot(beta,beta) - self._lambdaSign2 * self.sign2**2
r = margll - self._lambdaBeta * NP.dot(beta,beta) - self._lambdaSign2 * self.sign2**2
if self._isK0Set:
r -= self._lambdaSig02 * self.sig02**2
if self._isK1Set:
r -= self._lambdaSig12 * self.sig12**2
return r
assert False, 'Unknown penalty.'
def _betaNoBias(self):
beta = self.beta.copy()
if self._hasBias:
assert self._hasBias, "You set to penalize bias but there isn't one."
beta = NP.concatenate( (beta[:self._biasIndex], beta[self._biasIndex+1:]) )
return beta
def _mll_gradient(self, optSig02=True, optSig12=True, optSign2=True, optBeta=True):
'''
Marginal loglikelihood gradient.
'''
g = self._rmll_gradient()
if self._penalty is None or optBeta is False:
return g
if optSig02:
sig02Index = 0
if optSig12:
sig12Index = 0+optSig02
if optSign2:
sign2Index = optSig02 + optSig12
firstIndex = optSig02 + optSig12 + optSign2
gs = NP.zeros(g.shape[0])
if self._penalty == 'l1':
assert False, 'Not implemented yet.'
elif self._penalty == 'l2':
gs[firstIndex:] = 2.0 * self._lambdaBeta * self.beta
if optSign2:
gs[sign2Index] = 2.0 * self._lambdaSign2 * self.sign2
if self._isK0Set and optSig02:
gs[sig02Index] = 2.0 * self._lambdaSig02 * self.sig02
if self._isK1Set and optSig12:
gs[sig12Index] = 2.0 * self._lambdaSig12 * self.sig12
else:
assert False, 'Unknown penalty.'
return g - gs
def _wrap_hyp(self, optSig02, optSig12, optSign2, optBeta):
x = NP.empty(sum([optSig02, optSig12, optSign2]) + sum([optBeta])*self._D)
i = 0
if optSig02:
x[i] = self._sig02
i += 1
if optSig12:
x[i] = self._sig12
i += 1
if optSign2:
x[i] = self._sign2
i += 1
if optBeta:
x[i:] = self._beta
return x
def _unwrap_hyp(self, x, optSig02, optSig12, optSign2, optBeta):
i = 0
if optSig02:
self.sig02 = x[i]
i += 1
if optSig12:
self.sig12 = x[i]
i += 1
if optSign2:
self.sign2 = x[i]
i += 1
if optBeta:
self.beta = x[i:]
def _calculateMean(self):
return self._X.dot(self._beta)
def _updateConstants(self):
pass
class GLMM_N3K1(GLMM):
def __init__(self, penalty=None, penalizeBias=False, debug=False):
GLMM.__init__(self, penalty=penalty, penalizeBias=penalizeBias, debug=debug)
self._K0 = None
self._K1 = None
def setK(self, K0, K1=None):
'''
set the Kernels K0 and K1.
--------------------------------------------------------------------------
Input:
K0 : [N*N] array, random effects covariance (positive definite)
K1 : [N*N] array, random effects covariance (positive definite)(optional)
--------------------------------------------------------------------------
'''
assert type(K0) is NP.ndarray, 'K0 must be a numpy.ndarray.'
assert NP.all(NP.isfinite(K0) & ~NP.isnan(K0)), 'K0 must have only numbers.'
assert len(K0.shape)==2, 'K0 must be a 2-dimensional array.'
assert K0.shape[0] > 0
assert K0.shape[1] > 0
assert check_symmetry(K0), 'K0 must be a symmetric matrix.'
assert check_definite_positiveness(K0), 'K0 must be a definite positive matrix.'
self._K0 = K0
if K1 is not None:
assert type(K1) is NP.ndarray, 'K1 must be a numpy.ndarray.'
assert NP.all(NP.isfinite(K1) & ~NP.isnan(K1)), 'K1 must have only numbers.'
assert len(K1.shape)==2, 'K1 must be a 2-dimensional array.'
assert K1.shape[0] > 0
assert K1.shape[1] > 0
assert check_symmetry(K1), 'K1 must be a symmetric matrix.'
assert check_definite_positiveness(K1), 'K1 must be a definite positive matrix.'
self._K1 = K1
self._isK0Set = K0 is not None
self._isK1Set = K1 is not None
self._updateConstantsCount += 1
def _updateConstants(self):
'''
Updates some constant members, which is needed when the user, for e.g., change
the effects.
'''
if self._updateConstantsCount == 0:
return
assert self._X is not None, 'You must set X.'
if self._G0 is not None and self._K0 is not None:
assert abs(self._K0-self._G0.dot(self._G0.T)).max() < NP.sqrt(NP.finfo(NP.float).eps), 'You have set both G0 and K0, but K0!=G0*G0^T.'
if self._G1 is not None and self._K1 is not None:
assert abs(self._K1-self._G1.dot(self._G1.T)).max() < NP.sqrt(NP.finfo(NP.float).eps), 'You have set both G1 and K1, but K1!=G1*G1^T.'
self._N = self._X.shape[0]
self._D = self._X.shape[1]
if self._K0 is None:
if self._G0 is None:
self._K0 = NP.zeros([self._N, self._N])
else:
self._K0 = self._G0.dot(self._G0.T)
if self._K1 is None:
if self._G1 is None:
self._K1 = NP.zeros([self._N, self._N])
else:
self._K1 = self._G1.dot(self._G1.T)
assert self._K0.shape[0]==self._X.shape[0], 'K0 (or G0) and X must have the same number of rows.'
assert self._K1.shape[0]==self._X.shape[0], 'K1 (or G1) and X must have the same number of rows.'
assert self._y is not None, 'You must set y.'
assert self._y.shape[0]==self._X.shape[0], 'X and y have incompatible sizes.'
self._updateApproximationCount += 1
self._updateConstantsCount = 0
# If A is dxN, we have O(dN^2) operations.
def _ldotK(self, A):
#TODO: pre-calculate in updateApproximationBegin
R = self._sig02 * dot(A, self._K0) + A * self._sign2
if self._K1 is not None:
R += self._sig12 * dot(A, self._K1)
return R
# If A is Nxd, we have O(dN^2) operations.
def _rdotK(self, A):
#TODO: pre-calculate in updateApproximationBegin
R = self._sig02 * dot(self._K0, A) + A * self._sign2
if self._K1 is not None:
R += self._sig12 * dot(self._K1, A)
return R
# Return K's diagonal, in O(N).
def _dKn(self):
#TODO: pre-calculate in updateApproximationBegin
d = self._sig02 * NP.diag(self._K0) + self._sign2
if self._K1 is not None:
d += self._sig12 * NP.diag(self._K1)
return d
def _calculateLn(self, K, D):
Bn = ddot(D, ddot(K, D, left=False), left=True)
Bn[NP.diag_indices_from(Bn)] += 1.0
Ln = cholesky(Bn, lower=True, check_finite=False)
return Ln
class GLMM_N1K3(GLMM):
def __init__(self, penalty=None, penalizeBias=False, debug=False):
GLMM.__init__(self, penalty=penalty, penalizeBias=penalizeBias, debug=debug)
# If A is dxN, we have O(kdN) operations (assuming N>=k>=d).
def _ldotK(self, A):
R = self._sig02 * (A.dot(self._G0).dot(self._G0.T)) + A * self._sign2
if self._G1 is not None:
R += self._sig12 * (A.dot(self._G1).dot(self._G1.T))
return R
# If A is Nxd, we have O(kdN) operations (assuming N>=k>=d).
def _rdotK(self, A):
R = self._sig02 * (self._G0.dot(self._G0.T.dot(A))) + A * self._sign2
if self._G1 is not None:
R += self._sig12 * (self._G1.dot(self._G1.T.dot(A)))
return R
# Return K's diagonal, in O((k0+k1)*N).
def _dKn(self):
d = self._sig02 * dotd(self._G0, self._G0.T) + self._sign2
if self._G1 is not None:
d += self._sig12 * dotd(self._G1, self._G1.T)
return d
def _updateConstants(self):
'''
Updates some constant members, which is needed when the user, for e.g., change
the effects.
'''
if self._updateConstantsCount == 0:
return
assert self._X is not None, 'You must set X.'
self._N = self._X.shape[0]
self._D = self._X.shape[1]
if self._G0 is None:
self._G0 = NP.zeros([self._N, 1])
if self._G1 is None:
self._G1 = NP.zeros([self._N, 1])
assert self._G0.shape[0]==self._X.shape[0], 'G0 and X must have the same number of rows.'
assert self._G1.shape[0]==self._X.shape[0], 'G1 and X must have the same number of rows.'
assert self._y is not None, 'You must set y.'
assert self._y.shape[0]==self._X.shape[0], 'X and y have incompatible sizes.'
self._updateApproximationCount += 1
self._updateConstantsCount = 0
def _calculateG01(self):
G01 = NP.concatenate( (NP.sqrt(self._sig02)*self._G0, NP.sqrt(self._sig12)*self._G1), axis=1 )
return G01
def _calculateLk(self, G01, D):
Bk = dot(G01.T, ddot(D, G01, left=True))
Bk[NP.diag_indices_from(Bk)] += 1.0
Lk = cholesky(Bk, lower=True, check_finite=False)
return Lk
| apache-2.0 |
sknepneklab/SAMoS | analysis/plot_analysis_polar/plot_order_PRE.py | 1 | 3758 | # ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
#! /usr/bin/python
import sys, os, glob
import cPickle as pickle
import numpy as np
import scipy as sp
from scipy.io import savemat
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
#from matplotlib import rc
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
# setting global parameters
#matplotlib.rcParams['text.usetex'] = 'true'
matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['axes.linewidth'] = 2
matplotlib.rcParams['xtick.major.size'] = 8
matplotlib.rcParams['ytick.major.size'] = 8
matplotlib.rcParams['font.size']=20
matplotlib.rcParams['legend.fontsize']=14
cdict = {'red': [(0.0, 0.75, 0.75),
(0.3, 1.0, 1.0),
(0.5, 0.4, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.5),
(0.5, 1.0, 1.0),
(0.75, 0.5, 0.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.7, 1.0, 1.0),
(1.0, 0.25, 0.25)]}
basedir='/media/drogon/home/silke/Documents/Curved/Runs_rebuttal/'
# Order parameter n = |\frac{1}{N R v_0} \sum r \times v|
fig=plt.figure(figsize=(10,7),linewidth=2.0)
ax=plt.gca()
RList=['5','8','12','16','20','40','60']
#testmap2=LinearSegmentedColormap('test',cdict,N=len(RList))
JList=['1']
vList=['0.5']
orderpar=np.zeros((len(RList),))
dorder=np.zeros((len(RList),))
rval=np.zeros((len(RList),))
for i in range(len(vList)):
for j in range(len(RList)):
print vList[i],RList[j]
outfile2=basedir + '/axis_v0' + vList[i] + '_R' + RList[j] + '.dat'
axis=sp.loadtxt(outfile2, unpack=True)[:,:]
orderpar0=np.sqrt(axis[3,:]**2+axis[4,:]**2+axis[5,:]**2)
orderpar[j]=np.mean(orderpar0)
dorder[j]=np.std(orderpar0)
rval[j]=float(RList[j])
plt.errorbar(rval,orderpar,yerr=dorder,color=(0.8,0,0), linestyle='solid',marker='s',markersize=10,)
plt.xlabel(r'$R$')
plt.ylabel('p')
#plt.ylim(0,1.1)
plt.xlim(0,65)
#plt.legend(loc=4,ncol=1)
plt.title('Order parameter')
basedir='/media/drogon/home/silke/Documents/Curved/Runs_nuslices/'
# Order parameter n = |\frac{1}{N R v_0} \sum r \times v|
fig=plt.figure(figsize=(10,7),linewidth=2.0)
ax=plt.gca()
nuList=['0.001', '0.1','0.2', '0.3','0.5', '0.7','1','1.5', '2', '2.5', '3', '4', '5']
JList=['1']
vList=['0.01','0.1','0.5']
testmap2=LinearSegmentedColormap('test',cdict,N=len(vList))
orderpar=np.zeros((len(nuList),))
dorder=np.zeros((len(nuList),))
nuval=np.zeros((len(nuList),))
for i in range(len(vList)):
for j in range(len(nuList)):
print vList[i],nuList[j]
outfile2=basedir + '/axis_v0' + vList[i] + '_nu' + nuList[j] + '.dat'
axis=sp.loadtxt(outfile2, unpack=True)[:,:]
orderpar0=np.sqrt(axis[3,:]**2+axis[4,:]**2+axis[5,:]**2)
orderpar[j]=np.mean(orderpar0)
dorder[j]=np.std(orderpar0)
nuval[j]=float(nuList[j])
plt.errorbar(nuval,orderpar,yerr=dorder,color=testmap2(i), linestyle='solid',marker='s',markersize=10,)
#hmm=np.linspace(-3,1,10)
#plt.plot(hmm,hmm/hmm,linestyle='--',color='k')
plt.xlabel(r'$\nu$')
plt.ylabel('p')
#plt.ylim(0,1.1)
#plt.xlim(0,2.2)
plt.legend(loc=4,ncol=1)
plt.title('Order parameter')
plt.show()
| gpl-3.0 |
sanjayankur31/nest-simulator | pynest/examples/spatial/grid_iaf_irr.py | 20 | 1453 | # -*- coding: utf-8 -*-
#
# grid_iaf_irr.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create 12 freely placed iaf_psc_alpha neurons
-----------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
pos = nest.spatial.free([nest.random.uniform(-0.75, 0.75), nest.random.uniform(-0.5, 0.5)], extent=[2., 1.5])
l1 = nest.Create('iaf_psc_alpha', 12, positions=pos)
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('Extent: 2.0')
plt.ylabel('Extent: 1.5')
plt.show()
# plt.savefig('grid_iaf_irr.png')
| gpl-2.0 |
williamgilpin/netsim | marktools/renorm_neq.py | 1 | 3232 | """
Given row-normalized stochastic matrices, functions to consolidate columns, rows,
and both rows and columns. Unlike the function renorm(), the row consolidation
does not use the steady-state state occupation probabilities but instead weighs
each row in a cluster equally---corresponding to local equilibration over some
timescale proportional to the smallest transition rate between two states that get
consolidated.
"""
from numpy import *
from scipy import *
from matplotlib.pyplot import *
from scipy.linalg import eig
def renorm_col(inmat, k=2, its=1):
"""
Take a row-normalized transition matrix and
k-tuple the columns together
NOTE: THE COLUMNS ARE CLUSTERED BY THEIR ORIGINAL SORTING
Parameters (also attributes)
----------
inmat : array
An graph adjacency matrix to be renormalized
k : int
The number of states to cluster in each step
its : int
The number of times to apply the
renormalization operation
Returns
-------
tmat : array
A renormalized array
"""
tmat = copy(inmat)
for tt in range(its):
sz = tmat.shape
num_col = sz[1]
nmat = zeros( [sz[0], ceil(sz[1]/double(k))] )
nsz = nmat.shape
for ii in range(nsz[1]-1):
nmat[:, ii] = sum(tmat[:,(k*ii):(k*(ii+1))], axis=1)
if (mod(num_col, k) == 0):
nmat[:, -1] = sum(tmat[:,-k:], axis=1)
else:
nmat[:, -1] = sum(tmat[:,-(mod(num_col, k)):], axis=1)
tmat=nmat
return tmat
def renorm_row(inmat, k=2, its=1):
"""
Uniform weighting assumes local equilibration
Take a row-normalized transition matrix and
k-tuple the rows together
NOTE: THE ROWS ARE CLUSTERED BY THEIR ORIGINAL SORTING
Parameters (also attributes)
----------
inmat : array
An graph adjacency matrix to be renormalized
k : int
The number of states to cluster in each step
its : int
The number of times to apply the
renormalization operation
Returns
-------
tmat : array
A renormalized array
"""
tmat = copy(inmat).T
for tt in range(its):
sz = tmat.shape
num_col = sz[1]
nmat = zeros( [sz[0], ceil(sz[1]/double(k))] )
nsz = nmat.shape
for ii in range(nsz[1]-1):
nmat[:, ii] = (1./k)*sum(tmat[:,(k*ii):(k*(ii+1))], axis=1)
if (mod(num_col, k) == 0):
nmat[:, -1] = (1./k)*sum(tmat[:,-k:], axis=1)
else:
nmat[:, -1] = (1./(mod(num_col, k)))*sum(tmat[:,-(mod(num_col, k)):], axis=1)
tmat = nmat
return tmat.T
def renorm_neq(inmat, k=2, its=1):
"""
Compute a square renormalization on both the rows and
columns of a square array. It does not use the equilibirum distribution to
weight the combinations of rows, but rather uses the assumption of local equilibration
within each consolidated state (so each row gets equal weighting)
"""
return renorm_row(renorm_col(inmat, k=k, its=its), k=k, its=its)
| mit |
ashapochka/saapy | setup.py | 1 | 3540 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
deps = [
'invoke', # task automation tool, see tasks.py for project task definitions
# python optimization and utilities
'cython', # python to c optimization
'toolz', # functional programming support
'recordclass', # better than namedtuple
'sortedcontainers', # sorted analogs of python containers
'appdirs', # paths to standard directories
'layeredconfig', # application configuration support
'pyyaml', # yaml support
'profilehooks', # time profiling of the functions
'ujson', # faster json
'cryptography', # better security
# REST clients
'uritemplate', # templates for uri and url
'requests', # enhanced http library
'jira', # JIRA client API
'github3.py', # Github client
'boto', # python client for AWS
# various clients
'gitpython', # API to git repositories
'antlr4-python3-runtime',
# runtime to run code parser generated by antlr4 for python 3
# number crunching
'numpy', # dependency from pymc
'numexpr', # fast numerical array computations
'scipy', # mathematics platform
'pandas', # tabular data crunching
'feather-format', # feather file format support for pandas data frames
'sympy', # symbolic math
# statsmodels # statistical models
# pymc # Bayesian estimation
'simpy', # simulations
# machine learning
'scikit-learn', # machine learning support
'keras', # API for deep learning implementations such as TensorFlow
'tensorflow', # deep learning library
# text crunching
'pyIsEmail', # email validation
'babel',
# text localization and formatting of numbers, percents, currencies
'python-Levenshtein', # fast string comparison
'fuzzywuzzy', # fuzzy string matching with heuristics based on difflib
'pyenchant', # spell checking
'spacy',
'gensim',
'pyLDAvis',
'textacy',
# nltk # language analysis, also need to install nltk data as described
# on nltk site
# graphics, charts, document generation
'python-docx', # word document manipulations
'matplotlib', # graphing
'seaborn', # enhanced graphing on top of matplotlib
'bokeh', # better charts
'nxviz', # networkx graph vizualization based on matplotlib
# xlwings # excel python scripting only win and osx
# python-pptx # powerpoint support
# pillow # graphics manipulation
# tabulate # generate tables in text files from data structures like
# pandas data frames or lists of lists
# graph structures
'networkx', # support for graph based domain model
'neo4j-driver', # neo4j official driver
# Jupyter and IPython support
'jupyter', # Jupyter notebook server
'jupyter_contrib_nbextensions',
'nbconvert',
'pandas-profiling' # pandas dataframe report generation
]
# TODO: by default should be README.rst, will .md work?
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
saapy_license = f.read()
setup(
name='saapy',
version='0.0.1',
description='SAApy - System Architecture Assessment Toolset',
long_description=readme,
author='Andriy Shapochka',
author_email='ashapochka@gmail.com',
url='https://github.com/ashapochka/saapy',
license=saapy_license,
packages=find_packages(include=('saapy', 'saapy.*')),
install_requires=deps
)
# TODO: enhance packaging and add to PYPI,
# ref https://github.com/pypa/sampleproject
| apache-2.0 |
poryfly/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.