repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
CarterBain/AlephNull | tests/risk/answer_key.py | 3 | 11125 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'r') as f:
md5 = hashlib.md5()
while True:
buf = f.read(1024)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'w') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'P', 8, 19),
'3-Month': DataIndex('s_p', 'Q', 10, 19),
'6-month': DataIndex('s_p', 'R', 13, 19),
'year': DataIndex('s_p', 'S', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'T', 8, 19),
'3-Month': DataIndex('s_p', 'U', 10, 19),
'6-month': DataIndex('s_p', 'V', 13, 19),
'year': DataIndex('s_p', 'W', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'V', 23, 34),
'3-Month': DataIndex('Sim Period', 'W', 25, 34),
'6-month': DataIndex('Sim Period', 'X', 28, 34),
'year': DataIndex('Sim Period', 'Y', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AD', 23, 34),
'3-Month': DataIndex('Sim Period', 'AE', 25, 34),
'6-month': DataIndex('Sim Period', 'AF', 28, 34),
'year': DataIndex('Sim Period', 'AG', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BB', 23, 34),
'3-Month': DataIndex('Sim Period', 'BC', 25, 34),
'6-month': DataIndex('Sim Period', 'BD', 28, 34),
'year': DataIndex('Sim Period', 'BE', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'AX', 23, 34),
'3-Month': DataIndex('Sim Period', 'AY', 25, 34),
'6-month': DataIndex('Sim Period', 'AZ', 28, 34),
'year': DataIndex('Sim Period', 'BA', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'Y', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AB', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AC', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return map(value_parser, self.get_raw_values(data_index))
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
})
| apache-2.0 |
yvlasov/ConProbIN | try-ml/plot_regression.py | 1 | 1435 | #!/usr/bin/python
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(seed=0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
print(X)
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| mit |
Aasmi/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
ua-snap/downscale | snap_scripts/baseline_climatologies/cru_cl20_climatology_preprocess_2km.py | 1 | 7559 | # # # PREPROCESS CRU CL20 1961-1990 CLIMATOLOGY DATA (http://www.cru.uea.ac.uk/cru/data/hrg/tmc)
# # author: Michael Lindgren (malindgren@alaska.edu) -- Sept. 2016
# # # #
import numpy as np
def xyz_to_grid( x, y, z, xi, yi, method='linear', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
matplotlib.mlab.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using `numpy.meshgrid` / `numpy.mgrid`
order (xi, yi)
method = 'linear' # hardwired currently due to multiprocessing bug with scipy griddata
'''
import numpy as np
from matplotlib.mlab import griddata
return griddata( x, y, z, xi, yi, interp=method ).astype( output_dtype )
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray( lon )
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def regrid( x ):
return xyz_to_grid( **x )
if __name__ == '__main__':
import os, rasterio
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from pathos.mp_map import mp_map
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess CRU CL2.0 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-p", "--base_path", action='store', dest='base_path', type=str, help="path to parent directory with a subdirector(ies)y storing the data" )
parser.add_argument( "-cru", "--cru_filename", action='store', dest='cru_filename', type=str, help="string path to the .tar.gz file location, downloaded from the CRU site" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string abbreviated name of the variable being processed." )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="string path to a template raster dataset to match the CRU CL2.0 to." )
# parse and unpack the args
args = parser.parse_args()
base_path = args.base_path
cru_filename = args.cru_filename
variable = args.variable
template_raster_fn = args.template_raster_fn
# # # # FOR TESTING # # # #
# base_path = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/cru_cl20'
# cru_filename = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS20/grid_10min_pre.dat.gz'
# variable = 'pre'
# template_raster_fn = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data/templates/akcan_2km/tas_mean_C_ar5_IPSL-CM5A-LR_rcp26_01_2006.tif'
# # # # # # # # # # # # # #
# build an output path to store the data generated with this script
output_path = os.path.join( base_path, 'climatologies','cru_cl20','2km', variable )
if not os.path.exists( output_path ):
os.makedirs( output_path )
months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
colnames = [ 'lat', 'lon' ] + months
out_colnames = colnames
if variable == 'pre':
colnames = colnames + [ 'cv{}'.format(m) for m in months ]
if variable == 'elv':
colnames = ['lat','lon','01']
out_colnames = colnames
months_lookup = { count+1:month for count, month in enumerate( months ) }
cru_df = pd.read_csv( cru_filename, delim_whitespace=True, compression='gzip', header=None, names=colnames )
# slice to the pre values only. We dont want the cv values for now.
cru_df = cru_df[ out_colnames ]
# manually flip to PCLL for interpolation
cru_df['lon'][ cru_df['lon'] < 0 ] = cru_df['lon'][ cru_df['lon'] < 0 ] + 360
cru_df['geometry'] = cru_df.apply( lambda x: Point( x.lon, x.lat), axis=1 )
cru_shp = gpd.GeoDataFrame( cru_df, geometry='geometry', crs={'init':'EPSG:4326'} )
# set bounds to interpolate over
# xmin, ymin, xmax, ymax = (0,-90, 360, 90)
xmin, ymin, xmax, ymax = (160, 0, 300, 90) # smaller than global and larger than what we need.
# multiply arcminutes in degree by 360(180) for 10' resolution
rows = 60 * ( ymax - ymin )
cols = 60 * ( xmax - xmin )
# build the output grid
x = np.linspace( xmin, xmax, cols )
y = np.linspace( ymax, ymin, rows ) # [ NEW March 2018 ]... flipped ymax/min order in this operation to be north-up...
xi, yi = np.meshgrid( x, y )
if variable == 'elv':
args_list = [{'x':np.array(cru_df['lon']),'y':np.array(cru_df['lat']),'z':np.array(cru_df['01']),'xi':xi,'yi':yi}]
else:
args_list = [ {'x':np.array(cru_df['lon']),'y':np.array(cru_df['lat']),'z':np.array(cru_df[month]),'xi':xi,'yi':yi} for month in months ]
# run interpolation in parallel
interped_grids = mp_map( regrid, args_list, nproc=12 )
# stack and give a proper nodata value
arr = np.array([ i.data for i in interped_grids ])
arr[ np.isnan(arr) ] = -9999
pcll_affine = transform_from_latlon( y, x )
meta = {'transform': pcll_affine,
'count': 1,
'crs': {'init':'epsg:4326'},
'driver': u'GTiff',
'dtype': 'float32',
'height': rows,
'nodata': -9999,
'width': cols,
'compress':'lzw'}
# TESTING WRITE BELOW
# with rasterio.open( 'test_pcll_cru_cl20_f.tif','w', **meta ) as out:
# out.write( arr[0,...], 1 )
# # # # # # # # # #
# set up a dir to toss the intermediate files into -- since we are using gdalwarp...
intermediate_path = os.path.join( output_path, 'intermediates' )
if not os.path.exists( intermediate_path ):
os.makedirs( intermediate_path )
out_paths = []
for i in range( arr.shape[0] ):
output_filename = os.path.join( intermediate_path, '{}_cru_cl20_akcan_{}_1961-1990_PCLL.tif'.format( variable, months_lookup[ i+1 ] ) )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr[ i, ... ], 1 )
out_paths = out_paths + [ output_filename ]
# # template dataset
template_raster = rasterio.open( template_raster_fn )
resolution = template_raster.res
template_meta = template_raster.meta
template_meta.update( compress='lzw', nodata=-9999 )
a,b,c,d = template_raster.bounds
# FLIP IT BACK TO GREENWICH-CENTERED using gdalwarp... then to AKCAN 2km...
for fn in out_paths:
os.system( 'gdalwarp -q -co COMPRESS=LZW -overwrite -dstnodata -9999 -multi -t_srs EPSG:4326 -te -180 0 180 90 {} {}'.format( fn, fn.replace( 'PCLL', 'LL' ) ) )
# build an output data set based on the template raster extent and reproject _into_ it
final_fn = fn.replace( '_PCLL', '' )
final_fn = os.path.join( output_path, os.path.basename(final_fn) )
if os.path.exists( final_fn ):
os.remove( final_fn )
mask = template_raster.read_masks( 1 ).astype( np.float32 )
with rasterio.open( final_fn, 'w', **template_meta ) as out:
out.write( np.empty_like( mask ), 1 )
os.system( 'gdalwarp -q -wo SOURCE_EXTRA=100 -multi -srcnodata -9999 -dstnodata -9999 {} {}'.format( fn.replace( 'PCLL', 'LL' ), final_fn ) )
# mask newly updated warped output dset
with rasterio.open( final_fn, 'r+' ) as rst:
arr = rst.read( 1 )
arr[ mask == 0 ] = -9999
# round the precip and temperature outputs to the desired precisions
if variable in ['pre','pr','ppt']:
arr[ arr != -9999 ] = np.around( arr[ arr != -9999 ], 0 )
elif variable in ['tmp','tas']:
arr[ arr != -9999 ] = np.around( arr[ arr != -9999 ], 0 )
rst.write( arr, 1 )
print( 'completed run of {}'.format( variable ) )
| mit |
rueberger/brio | brio/misc/plotting.py | 1 | 11003 | """
This model contains various plotting utilities
"""
import time
import itertools
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import string
from .sta import auto_sta, factor
from brio.blocks.layer import LIFLayer
plt.ion()
SEAMAP = mpl.colors.ListedColormap(sns.cubehelix_palette(256, start=.5, rot=-.75))
def img_slideshow(imgs):
""" plots a slideshow of the imgs
:param imgs: list or array of images
:returns: None
:rtype: None
"""
fig = plt.figure()
for img in imgs:
plt.clf()
plt.imshow(img, cmap=SEAMAP, interpolation='none')
fig.canvas.draw()
time.sleep(.1)
def hist_slideshow(arr):
""" plots a histogram or density plot of the scalar distribution data
:param arr: list of array of draws from scalar distribution
:returns: None
:rtype: None
"""
fig = plt.figure()
for distr in arr:
if len(distr) != 0:
plt.clf()
plt.hist(distr, bins=250, normed=True)
fig.canvas.draw()
time.sleep(.1)
class ParamPlot(object):
"""
This class provides a plot to visualize network parameters that can be updated on the fly
"""
#pylint: disable=too-few-public-methods
def __init__(self, net, layers=None, show_all=False, shape=None):
""" Initialize this class
:param net: network is a ycurrently in training network
:param layers: layers to display. list of indices. by default all
:param show_all: Show all parameters. Default of False only shows weight distributions
:returns: the initialized ParamPlot object
:rtype: ParamPlot
"""
if layers is not None:
self.layers = [net.layers[idx] for idx in layers]
else:
self.layers = net.layers[1:]
self.net = net
self.cons = net.connections.values()
self.show_all = show_all
if shape is None:
if show_all:
nrows = max(len(self.cons), len(self.layers) * 2)
ncols=3
else:
# need something else: works terrible for primes
nrows, ncols = factor(len(self.cons))
else:
nrows, ncols = shape
self.fig, self.ax_arr = plt.subplots(nrows=nrows,
ncols=ncols, figsize=(16, 10))
self.t = np.arange(self.net.params.presentations)
def update_plot(self):
""" updates the plot without creating a new figure
:returns: None
:rtype: None
"""
sns.set_style("whitegrid")
self.fig.suptitle("Parameter distributions at timestep {}".format(self.net.t_counter))
for axis in np.ravel(self.ax_arr):
axis.clear()
if self.show_all:
for con, axis in zip(self.cons, self.ax_arr[:, 0]):
axis.hist(np.ravel(con.weights), bins=250, normed=True)
axis.set_title("Weight distribution for {}".format(str(con)))
for layer, axis in zip(self.layers, self.ax_arr[:, 1]):
axis.hist(np.ravel(layer.bias), bins=20, normed=True)
axis.set_title("Bias distribution for {}".format(str(layer)))
for layer, axis in zip(self.layers, self.ax_arr[len(self.layers):, 1]):
axis.hist(np.ravel(layer.lfr_mean), bins=20, normed=True)
axis.set_title("Firing rate distribution for {}".format(str(layer)))
for layer, axis in zip(self.layers, self.ax_arr[len(self.layers):, 2]):
if isinstance(layer, LIFLayer):
potentials = np.array(layer.pot_history)[:, :, -1].T
for u_t in potentials:
axis.plot(self.t, u_t)
axis.set_title("Potential history for one stimulus {}".format(str(layer)))
else:
for con, axis in zip(self.cons, self.ax_arr.ravel()):
axis.hist(np.ravel(con.weights), bins=250, normed=True)
axis.set_title("Weight distribution for {}".format(str(con)), fontsize=6)
self.fig.subplots_adjust(hspace=0.4, wspace=0.3)
plt.draw()
def plot_param_distr(net):
""" plots histograms of the weight distributions in the different layers
:param net: a network that is currently being trained
:param update_interval: time interval in seconds for the plot to update
:param n_updates: number of times to update the plot
:returns: None
:rtype: None
"""
cons = list(net.connections)
nrows = max(len(cons), len(net.layers[1:]) * 2)
fig, ax_arr = plt.subplots(nrows=nrows, ncols=2, figsize=(16, 10))
for con, axis in zip(cons, ax_arr[:, 0]):
axis.hist(np.ravel(con.weights), bins=250, normed=True)
axis.set_title("Weight distribution for {}".format(str(con)))
for layer, axis in zip(net.layers[1:], ax_arr[:, 1]):
axis.hist(np.ravel(layer.bias), bins=250, normed=True)
axis.set_title("Bias distribution for {}".format(str(layer)))
for layer, axis in zip(net.layers[1:], ax_arr[len(net.layers[1:]):, 1]):
axis.hist(np.ravel(layer.firing_rates), bins=250, normed=True)
axis.set_title("Firing rate distibution for {}".format(str(layer)))
fig.subplots_adjust(hspace=0.4)
plt.draw()
def plot_receptive_fields(net, layer_idx, slideshow=True,
n_samples=1E5, stimulus_generator=None, stereo=False):
""" Make a plot of the receptive field of network
:param net: trained network to plot the receptive fields of
:param layer_idx: idx of the layer you would like to plot. keys into network.layers
:param slideshow: if True show receptive fields one at at time.
Otherwise show them all at the same time
:param n_samples: number of samples to compute STAs wtih
:param stimulus_generator: a generator object. calling next on this generator must return
an array that can be flatted to the shape of the input layer.
By default uniform random stimuli are generated for the relevant domain
:param stereo: if True split stereo images into two parts
:returns: None
:rtype: None
"""
assert isinstance(layer_idx, int)
response_dict, stimuli = auto_sta(net, n_samples, stimulus_generator, layer_idx=[layer_idx])
are_imgs = (stimuli.ndim == 3)
if are_imgs:
imgs = np.zeros((net.layers[layer_idx].n_dims, stimuli.shape[1], stimuli.shape[2]))
for unit_idx in xrange(net.layers[layer_idx].n_dims):
response_idx = response_dict[(layer_idx, unit_idx)]
imgs[unit_idx] = np.mean(stimuli[response_idx], axis=0)
if slideshow:
img_slideshow(imgs)
else:
if stereo:
assert imgs.shape[1] == 2 * imgs.shape[2]
side_len = imgs.shape[2]
l_imgs = imgs[:, :side_len, :]
r_imgs = imgs[:, side_len:, :]
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(18, 14))
plot_concat_imgs(l_imgs, axis=axes[0])
plot_concat_imgs(r_imgs, axis=axes[1])
else:
plot_concat_imgs(imgs)
else:
distrs = [response_dict[layer_idx, unit_idx] for unit_idx in xrange(net.layers[layer_idx].n_dims)]
if slideshow:
hist_slideshow(distrs)
else:
raise NotImplementedError("Have yet to implement multiple scalar distr")
def plot_concat_imgs(imgs, border_thickness=2, axis=None, normalize=False):
""" concatenate the imgs together into one big image separated by borders
:param imgs: list or array of images. total number of images must be a perfect square and
images must be square
:param border_thickness: how many pixels of border between
:param axis: optional matplotlib axis object to plot on
:returns: array containing all receptive fields
:rtype: array
"""
sns.set_style('dark')
assert isinstance(border_thickness, int)
assert int(np.sqrt(len(imgs))) == np.sqrt(len(imgs))
assert imgs[0].shape[0] == imgs[0].shape[1]
if normalize:
imgs = np.array(imgs)
imgs /= np.sum(imgs ** 2, axis=(1,2)).reshape(-1, 1, 1)
img_length = imgs[0].shape[0]
layer_length = int(np.sqrt(len(imgs)))
concat_length = layer_length * img_length + (layer_length - 1) * border_thickness
border_color = np.nan
concat_rf = np.ones((concat_length, concat_length)) * border_color
for x_idx, y_idx in itertools.product(xrange(layer_length),
xrange(layer_length)):
# this keys into imgs
flat_idx = x_idx * layer_length + y_idx
x_offset = border_thickness * x_idx
y_offset = border_thickness * y_idx
# not sure how to do a continuation line cleanly here
concat_rf[x_idx * img_length + x_offset: (x_idx + 1) * img_length + x_offset,
y_idx * img_length + y_offset: (y_idx + 1) * img_length + y_offset] = imgs[flat_idx]
if axis is not None:
axis.imshow(concat_rf, interpolation='none', aspect='auto')
else:
plt.imshow(concat_rf, interpolation='none', aspect='auto')
def write_hist_to_stdout(data, n_bins=25, lines=10):
""" Write a text based histogram to stdout
:param data: the data to visualize
:param bins: number of bins
:param lines: how many vertical lines for the histogram to span
:returns: None
:rtype: None
"""
pdf, bins = np.histogram(data, bins=n_bins, density=True)
scaled_pdf = pdf * lines
col_width = 4
x_axis_width = col_width * n_bins
y_label_width = 5
# might want to use a string writer?
for height in range(lines)[::-1]:
line = string.join(['*** ' if d > height else ' ' for d in scaled_pdf], '')
print "{:3.1f} | {}".format(height / float(lines), line)
axis = '-' * n_bins * col_width
mid_sep = (x_axis_width - 5) / 2
x_labels = ['{:5.2f}'.format(bins[0]),
' ' * mid_sep,
'{:5.2f}'.format(bins[n_bins / 2]),
' ' * mid_sep,
'{:5.2f}'.format(bins[-1])
]
print " {}".format(axis)
print string.join(x_labels, '')
def visualize_inhibition(einet, unit_idx=0, n_show=9):
"""Shows the receptive field of the excitatory cells that the inhibitory cell
at unit_idx inhibits the most
:param einet: must be an einet
:param unit_idx: unit_idx of inhibitory layer
:returns: None
:rtype: None
"""
from misc.sta import factor
inhib_weights = einet.layers[1].outputs[0].weights.T
oja_weights = einet.layers[0].outputs[0].weights.T
e_idx = np.argsort(inhib_weights[unit_idx])[::-1][:n_show]
img_dims = factor(einet.layers[0].n_dims)
imgs = [w.reshape(*img_dims) for w in oja_weights[e_idx]]
print inhib_weights[unit_idx][e_idx]
plot_concat_imgs(imgs)
| gpl-2.0 |
vrv/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ssaeger/scikit-learn | sklearn/utils/multiclass.py | 40 | 12966 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
jdanbrown/pydatalab | google/datalab/bigquery/_schema.py | 5 | 12449 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Table and View Schema APIs."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import range
from past.builtins import basestring
from builtins import object
import datetime
import pandas
import pprint
class SchemaField(object):
""" Represents a single field in a Table schema.
This has the properties:
- name: the flattened, full-qualified name of the field.
- type: the type of the field as a string ('INTEGER', 'BOOLEAN', 'FLOAT', 'STRING'
or 'TIMESTAMP').
- mode: the mode of the field; 'NULLABLE' by default.
- description: a description of the field, if known; empty string by default.
"""
def __init__(self, name, type, mode='NULLABLE', description=''):
self.name = name
self.type = type
self.mode = mode
self.description = description
def _repr_sql_(self):
"""Returns a representation of the field for embedding into a SQL statement.
Returns:
A formatted field name for use within SQL statements.
"""
return self.name
def __eq__(self, other):
""" Compare two schema field objects for equality (ignoring description). """
return self.name == other.name and self.type == other.type\
and self.mode == other.mode
def __repr__(self):
""" Returns the schema field as a string form of a dictionary. """
return 'BigQuery Schema Field:\n%s' % pprint.pformat(vars(self), width=1)
def __getitem__(self, item):
# TODO(gram): Currently we need this for a Schema object to work with the Parser object.
# Eventually if we change Parser to only work with Schema (and not also with the
# schema dictionaries in query results) we can remove this.
if item == 'name':
return self.name
if item == 'type':
return self.type
if item == 'mode':
return self.mode
if item == 'description':
return self.description
class Schema(list):
"""Represents the schema of a BigQuery table as a flattened list of objects representing fields.
Each field object has name, type, mode and description properties. Nested fields
get flattened with their full-qualified names. So a Schema that has an object A with nested
field B will be represented as [(name: 'A', ...), (name: 'A.b', ...)].
"""
@staticmethod
def _from_dataframe(dataframe, default_type='STRING'):
"""
Infer a BigQuery table schema from a Pandas dataframe. Note that if you don't explicitly set
the types of the columns in the dataframe, they may be of a type that forces coercion to
STRING, so even though the fields in the dataframe themselves may be numeric, the type in the
derived schema may not be. Hence it is prudent to make sure the Pandas dataframe is typed
correctly.
Args:
dataframe: The DataFrame.
default_type : The default big query type in case the type of the column does not exist in
the schema. Defaults to 'STRING'.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
type_mapping = {
'i': 'INTEGER',
'b': 'BOOLEAN',
'f': 'FLOAT',
'O': 'STRING',
'S': 'STRING',
'U': 'STRING',
'M': 'TIMESTAMP'
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append({'name': column_name,
'type': type_mapping.get(dtype.kind, default_type)})
return fields
@staticmethod
def _get_field_entry(name, value):
entry = {'name': name}
if isinstance(value, datetime.datetime):
_type = 'TIMESTAMP'
elif isinstance(value, datetime.date):
_type = 'DATE'
elif isinstance(value, datetime.time):
_type = 'TIME'
elif isinstance(value, bool):
_type = 'BOOLEAN'
elif isinstance(value, float):
_type = 'FLOAT'
elif isinstance(value, int):
_type = 'INTEGER'
elif isinstance(value, dict) or isinstance(value, list):
_type = 'RECORD'
entry['fields'] = Schema._from_record(value)
else:
_type = 'STRING'
entry['type'] = _type
return entry
@staticmethod
def _from_dict_record(data):
"""
Infer a BigQuery table schema from a dictionary. If the dictionary has entries that
are in turn OrderedDicts these will be turned into RECORD types. Ideally this will
be an OrderedDict but it is not required.
Args:
data: The dict to infer a schema from.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry(name, value) for name, value in list(data.items())]
@staticmethod
def _from_list_record(data):
"""
Infer a BigQuery table schema from a list of values.
Args:
data: The list of values.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
return [Schema._get_field_entry('Column%d' % (i + 1), value) for i, value in enumerate(data)]
@staticmethod
def _from_record(data):
"""
Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements
is used. For a list, the field names are simply 'Column1', 'Column2', etc.
Args:
data: The list of fields or dictionary.
Returns:
A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a
BigQuery Tables resource schema.
"""
if isinstance(data, dict):
return Schema._from_dict_record(data)
elif isinstance(data, list):
return Schema._from_list_record(data)
else:
raise Exception('Cannot create a schema from record %s' % str(data))
@staticmethod
def from_record(source):
"""
Infers a table/view schema from a single record that can contain a list of fields or a
dictionary of fields. The type of the elements is used for the types in the schema. For a
dict, key names are used for column names while for a list, the field names are simply named
'Column1', 'Column2', etc. Note that if using a dict you may want to use an OrderedDict
to ensure column ordering is deterministic.
Args:
source: The list of field values or dictionary of key/values.
Returns:
A Schema for the data.
"""
# TODO(gram): may want to allow an optional second argument which is a list of field
# names; could be useful for the record-containing-list case.
return Schema(Schema._from_record(source))
@staticmethod
def from_data(source):
"""Infers a table/view schema from its JSON representation, a list of records, or a Pandas
dataframe.
Args:
source: the Pandas Dataframe, a dictionary representing a record, a list of heterogeneous
data (record) or homogeneous data (list of records) from which to infer the schema, or
a definition of the schema as a list of dictionaries with 'name' and 'type' entries
and possibly 'mode' and 'description' entries. Only used if no data argument was provided.
'mode' can be 'NULLABLE', 'REQUIRED' or 'REPEATED'. For the allowed types, see:
https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes
Note that there is potential ambiguity when passing a list of lists or a list of
dicts between whether that should be treated as a list of records or a single record
that is a list. The heuristic used is to check the length of the entries in the
list; if they are equal then a list of records is assumed. To avoid this ambiguity
you can instead use the Schema.from_record method which assumes a single record,
in either list of values or dictionary of key-values form.
Returns:
A Schema for the data.
"""
if isinstance(source, pandas.DataFrame):
bq_schema = Schema._from_dataframe(source)
elif isinstance(source, list):
if len(source) == 0:
bq_schema = source
elif all(isinstance(d, dict) for d in source):
if all('name' in d and 'type' in d for d in source):
# It looks like a bq_schema; use it as-is.
bq_schema = source
elif all(len(d) == len(source[0]) for d in source):
bq_schema = Schema._from_dict_record(source[0])
else:
raise Exception(('Cannot create a schema from heterogeneous list %s; perhaps you meant ' +
'to use Schema.from_record?') % str(source))
elif isinstance(source[0], list) and \
all([isinstance(l, list) and len(l) == len(source[0]) for l in source]):
# A list of lists all of the same length; treat first entry as a list record.
bq_schema = Schema._from_record(source[0])
else:
# A heterogeneous list; treat as a record.
raise Exception(('Cannot create a schema from heterogeneous list %s; perhaps you meant ' +
'to use Schema.from_record?') % str(source))
elif isinstance(source, dict):
bq_schema = Schema._from_record(source)
else:
raise Exception('Cannot create a schema from %s' % str(source))
return Schema(bq_schema)
def __init__(self, definition=None):
"""Initializes a Schema from its raw JSON representation, a Pandas Dataframe, or a list.
Args:
definition: a definition of the schema as a list of dictionaries with 'name' and 'type'
entries and possibly 'mode' and 'description' entries. Only used if no data argument was
provided. 'mode' can be 'NULLABLE', 'REQUIRED' or 'REPEATED'. For the allowed types, see:
https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes
"""
super(Schema, self).__init__()
self._map = {}
self._bq_schema = definition
self._populate_fields(definition)
def __getitem__(self, key):
"""Provides ability to lookup a schema field by position or by name.
"""
if isinstance(key, basestring):
return self._map.get(key, None)
# noinspection PyCallByClass
return list.__getitem__(self, key)
def _add_field(self, name, type, mode='NULLABLE', description=''):
field = SchemaField(name, type, mode, description)
self.append(field)
self._map[name] = field
def find(self, name):
""" Get the index of a field in the flattened list given its (fully-qualified) name.
Args:
name: the fully-qualified name of the field.
Returns:
The index of the field, if found; else -1.
"""
for i in range(0, len(self)):
if self[i].name == name:
return i
return -1
def _populate_fields(self, data, prefix=''):
for field_data in data:
name = prefix + field_data['name']
type = field_data['type']
self._add_field(name, type, field_data.get('mode', None),
field_data.get('description', None))
if type == 'RECORD':
# Recurse into the nested fields, using this field's name as a prefix.
self._populate_fields(field_data.get('fields'), name + '.')
def __repr__(self):
""" Returns a string representation of the schema for notebooks."""
return 'BigQuery Schema - Fields:\n%s' % pprint.pformat(self._bq_schema, width=1)
def __eq__(self, other):
""" Compares two schema for equality. """
other_map = other._map
if len(self._map) != len(other_map):
return False
for name in self._map.keys():
if name not in other_map:
return False
if not self._map[name] == other_map[name]:
return False
return True
def __ne__(self, other):
""" Compares two schema for inequality. """
return not(self.__eq__(other))
| apache-2.0 |
schreiberx/sweet | benchmarks_sphere/paper_jrn_sl_exp/test_compare_wt_dt_vs_accuracy_galewsky_M512_6hours_l_n_uv/postprocessing_consolidate_prog_vrt.py | 8 | 6177 | #! /usr/bin/env python3
import sys
import math
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
sys.path.append('../')
import pretty_plotting as pp
sys.path.pop()
mule_plotting_usetex(False)
groups = ['runtime.timestepping_method']
tagnames_y = [
'sphere_data_diff_prog_vrt.res_norm_l1',
'sphere_data_diff_prog_vrt.res_norm_l2',
'sphere_data_diff_prog_vrt.res_norm_linf',
]
j = JobsData('./job_bench_*', verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(key)
for tagname_y in tagnames_y:
params = []
params += [
{
'tagname_x': 'runtime.timestep_size',
'xlabel': "Timestep size (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Timestep size vs. error',
'xscale': 'log',
'yscale': 'log',
'convergence': True,
},
]
params += [
{
'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',
'xlabel': "Wallclock time (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Wallclock time vs. error',
'xscale': 'log',
'yscale': 'log',
'convergence': False,
},
]
for param in params:
tagname_x = param['tagname_x']
xlabel = param['xlabel']
ylabel = param['ylabel']
title = param['title']
xscale = param['xscale']
yscale = param['yscale']
convergence = param['convergence']
print("*"*80)
print("Processing tag "+tagname_x)
print("*"*80)
if True:
"""
Plotting format
"""
# Filter out errors beyond this value!
def data_filter(x, y, jobdata):
if y == None:
return True
x = float(x)
y = float(y)
if math.isnan(y):
return True
if 'l1' in tagname_y:
if y > 1e1:
print("Sorting out L1 data "+str(y))
return True
elif 'l2' in tagname_y:
if y > 1e1:
print("Sorting out L2 data "+str(y))
return True
elif 'linf' in tagname_y:
if y > 1e2:
print("Sorting out Linf data "+str(y))
return True
else:
raise Exception("Unknown y tag "+tagname_y)
return False
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
fileid = "output_plotting_"+tagname_x.replace('.', '-').replace('_', '-')+"_vs_"+tagname_y.replace('.', '-').replace('_', '-')
if True:
#
# Proper naming and sorting of each label
#
# new data dictionary
data_new = {}
for key, data in d.data.items():
# generate nice tex label
#data['label'] = pp.get_pretty_name(key)
data['label'] = key #pp.get_pretty_name(key)
key_new = pp.get_pretty_name_order(key)+'_'+key
# copy data
data_new[key_new] = copy.copy(data)
# Copy back new data table
d.data = data_new
p = Plotting_ScatteredData()
def fun(p):
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
plt.tick_params(axis='x', which='minor')
p.ax.xaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.xaxis.get_minor_ticks():
tick.label.set_fontsize(8)
plt.tick_params(axis='y', which='minor')
p.ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_major_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.yaxis.get_minor_ticks():
tick.label.set_fontsize(6)
#
# Add convergence information
#
if convergence:
if 'l1' in tagname_y:
ps = [100, 1e-9]
elif 'l2' in tagname_y:
ps = [100, 5e-8]
elif 'linf' in tagname_y:
ps = [100, 1e-7]
else:
ps = [100, 1e-0]
p.add_convergence(2, ps)
annotate_text_template = "{:.1f} / {:.3f}"
p.plot(
data_plotting = d.get_data_float(),
xlabel = xlabel,
ylabel = ylabel,
title = title,
xscale = xscale,
yscale = yscale,
#annotate = True,
#annotate_each_nth_value = 3,
#annotate_fontsize = 6,
#annotate_text_template = annotate_text_template,
legend_fontsize = 8,
grid = True,
outfile = fileid+".pdf",
lambda_fun = fun,
)
print("Data plotting:")
d.print()
d.write(fileid+".csv")
print("Info:")
print(" NaN: Errors in simulations")
print(" None: No data available")
| mit |
QuLogic/vispy | vispy/testing/__init__.py | 21 | 2415 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Testing
=======
This module provides functions useful for running tests in vispy.
Tests can be run in a few ways:
* From Python, you can import ``vispy`` and do ``vispy.test()``.
* From the source root, you can do ``make test`` which wraps to
a call to ``python make test``.
There are various diffrent testing "modes", including:
* "full": run all tests.
* any backend name (e.g., "glfw"): run application/GL tests using a
specific backend.
* "nobackend": run tests that do not require a backend.
* "examples": run repo examples to check for errors and warnings.
* "flake": check style errors.
Examples get automatically tested unless they have a special comment toward
the top ``# vispy: testskip``. Examples that should be tested should be
formatted so that 1) a ``Canvas`` class is defined, or a ``canvas`` class
is instantiated; and 2) the ``app.run()`` call is protected by a check
if ``__name__ == "__main__"``. This makes it so that the event loop is not
started when running examples in the test suite -- the test suite instead
manually updates the canvas (using ``app.process_events()``) for under one
second to ensure that things like timer events are processed.
For examples on how to test various bits of functionality (e.g., application
functionality, or drawing things with OpenGL), it's best to look at existing
examples in the test suite.
The code base gets automatically tested by Travis-CI (Linux) and AppVeyor
(Windows) on Python 2.6, 2.7, 3.4. There are multiple testing modes that
use e.g. full dependencies, minimal dependencies, etc. See ``.travis.yml``
to determine what automatic tests are run.
"""
from ._testing import (SkipTest, requires_application, requires_ipython, # noqa
requires_img_lib, # noqa
has_backend, requires_pyopengl, # noqa
requires_scipy, has_matplotlib, # noqa
save_testing_image, TestingCanvas, has_pyopengl, # noqa
run_tests_if_main,
assert_is, assert_in, assert_not_in, assert_equal,
assert_not_equal, assert_raises, assert_true, # noqa
raises) # noqa
from ._runners import test # noqa
| bsd-3-clause |
madjelan/Data-Science-45min-Intros | ml-basis-expansion-101/kernel.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| unlicense |
jkleve/Optimization-Algorithms | utils/plot_utils.py | 1 | 2558 | import matplotlib.pyplot as plt # plotting
import matplotlib.mlab as mlab
import numpy as np
np.seterr(divide='ignore', invalid='ignore') # TODO FIX!!!
import sys
sys.path.append("../functions")
import rosenbrock_function
class PlotUtils:
def __init__(self, num_dims, bounds, func):
# you can only plot up to 2 dimensions
if num_dims > 2:
print("Can not plot more than 2 dimensions")
raise ValueError
if num_dims != 2:
raise ValueError("Feel free to implement PlotUtils for 1 dimension")
# needed to update plot on the fly
plt.ion()
self.num_dims = num_dims
self.bounds = bounds
self.fig, self.ax = plt.subplots()
self.line, = self.ax.plot([], [], 'ro')
self.ax.grid()
delta1 = float(bounds[0][1] - bounds[0][0]) / 100
delta2 = float(bounds[1][1] - bounds[1][0]) / 100
x1 = np.arange(bounds[0][0], bounds[0][1], delta1)
x2 = np.arange(bounds[1][0], bounds[1][1], delta2)
X, Y = np.meshgrid(x1, x2)
Z = func([X, Y])
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
#plt.figure()
if func == rosenbrock_function.objective_function:
CS = plt.contour(X, Y, Z, range(0,11) + range(100, 1000, 100))
else:
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
#plt.title('Simplest default with labels')
xlim_l = bounds[0][0]
xlim_u = bounds[0][1]
ylim_l = bounds[1][0]
ylim_u = bounds[1][1]
self.ax.set_xlim(xlim_l, xlim_u)
self.ax.set_ylim(ylim_l, ylim_u)
def __del__(self):
plt.close(self.fig)
def plot(self, points):
x1 = [point[0] for point in points]
x2 = [point[1] for point in points]
self.line.set_xdata(x1)
self.line.set_ydata(x2)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
if __name__ == "__main__":
import sys # check which version of python is runnint
# check if running with python3 or python2
PY3 = sys.version_info[0] == 3
data = [(0,0),(1,1),(2,2),(-3,-3)]
pu = PlotUtils(2, [(-10,10),(-10,10)])
pu.plot(data)
if PY3:
input("Waiting for Enter to be pressed ...")
else:
raw_input("Waiting for Enter to be pressed ...")
| mit |
davidnmurray/iris | lib/iris/tests/unit/plot/test_pcolor.py | 1 | 2423 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.pcolor` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.unit.plot import TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.pcolor(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.pcolor(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.pcolor(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.pcolor(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.pcolor,
self.lat_lon_cube, axes=ax)
plt.close(fig)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
brucexiaok/HyperLPR | hyperlpr/segmentation.py | 1 | 9655 | #coding=utf-8
import cv2
import numpy as np
# from matplotlib import pyplot as plt
import scipy.ndimage.filters as f
import scipy
import time
import scipy.signal as l
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPool2D
from keras.optimizers import SGD
from keras import backend as K
K.set_image_dim_ordering('tf')
def Getmodel_tensorflow(nb_classes):
# nb_classes = len(charset)
img_rows, img_cols = 23, 23
# number of convolutional filters to use
nb_filters = 16
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# x = np.load('x.npy')
# y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
# weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
# weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
model = Sequential()
model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols,1)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Conv2D(nb_filters, (nb_conv, nb_conv)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Flatten())
model.add(Dense(256))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
return model
def Getmodel_tensorflow_light(nb_classes):
# nb_classes = len(charset)
img_rows, img_cols = 23, 23
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# x = np.load('x.npy')
# y = np_utils.to_categorical(range(3062)*45*5*2, nb_classes)
# weight = ((type_class - np.arange(type_class)) / type_class + 1) ** 3
# weight = dict(zip(range(3063), weight / weight.mean())) # 调整权重,高频字优先
model = Sequential()
model.add(Conv2D(nb_filters, (nb_conv, nb_conv),input_shape=(img_rows, img_cols, 1)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Conv2D(nb_filters, (nb_conv * 2, nb_conv * 2)))
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(nb_pool, nb_pool)))
model.add(Flatten())
model.add(Dense(32))
# model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = Getmodel_tensorflow_light(3)
model2 = Getmodel_tensorflow(3)
import os
model.load_weights("./model/char_judgement1.h5")
# model.save("./model/char_judgement1.h5")
model2.load_weights("./model/char_judgement.h5")
# model2.save("./model/char_judgement.h5")
model = model2
def get_median(data):
data = sorted(data)
size = len(data)
# print size
if size % 2 == 0: # 判断列表长度为偶数
median = (data[size//2]+data[size//2-1])/2
data[0] = median
if size % 2 == 1: # 判断列表长度为奇数
median = data[(size-1)//2]
data[0] = median
return data[0]
import time
def searchOptimalCuttingPoint(rgb,res_map,start,width_boundingbox,interval_range):
t0 = time.time()
#
# for x in xrange(10):
# res_map = np.vstack((res_map,res_map[-1]))
length = res_map.shape[0]
refine_s = -2;
if width_boundingbox>20:
refine_s = -9
score_list = []
interval_big = int(width_boundingbox * 0.3) #
p = 0
for zero_add in xrange(start,start+50,3):
# for interval_small in xrange(-0,width_boundingbox/2):
for i in xrange(-8,int(width_boundingbox/1)-8):
for refine in xrange(refine_s,width_boundingbox/2+3):
p1 = zero_add# this point is province
p2 = p1 + width_boundingbox +refine #
p3 = p2 + width_boundingbox + interval_big+i+1
p4 = p3 + width_boundingbox +refine
p5 = p4 + width_boundingbox +refine
p6 = p5 + width_boundingbox +refine
p7 = p6 + width_boundingbox +refine
if p7>=length:
continue
score = res_map[p1][2]*3 -(res_map[p3][1]+res_map[p4][1]+res_map[p5][1]+res_map[p6][1]+res_map[p7][1])+7
# print score
score_list.append([score,[p1,p2,p3,p4,p5,p6,p7]])
p+=1
print p
score_list = sorted(score_list , key=lambda x:x[0])
# for one in score_list[-1][1]:
# cv2.line(debug,(one,0),(one,36),(255,0,0),1)
# #
# cv2.imshow("one",debug)
# cv2.waitKey(0)
#
print "寻找最佳点",time.time()-t0
return score_list[-1]
import sys
sys.path.append('../')
import recognizer as cRP
from skimage.filters import (threshold_otsu, threshold_niblack,
threshold_sauvola)
import niblack_thresholding as nt
def refineCrop(sections,width=16):
new_sections = []
for section in sections:
# cv2.imshow("section¡",section)
# cv2.blur(section,(3,3),3)
sec_center = np.array([section.shape[1]/2,section.shape[0]/2])
binary_niblack = nt.niBlackThreshold(section,17,-0.255)
imagex, contours, hierarchy = cv2.findContours(binary_niblack,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
boxs = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
ratio = w/float(h)
if ratio<1 and h>36*0.4 and y<16\
:
box = [x,y,w,h]
boxs.append([box,np.array([x+w/2,y+h/2])])
# cv2.rectangle(section,(x,y),(x+w,y+h),255,1)
# print boxs
dis_ = np.array([ ((one[1]-sec_center)**2).sum() for one in boxs])
if len(dis_)==0:
kernal = [0, 0, section.shape[1], section.shape[0]]
else:
kernal = boxs[dis_.argmin()][0]
center_c = (kernal[0]+kernal[2]/2,kernal[1]+kernal[3]/2)
w_2 = int(width/2)
h_2 = kernal[3]/2
if center_c[0] - w_2< 0:
w_2 = center_c[0]
new_box = [center_c[0] - w_2,kernal[1],width,kernal[3]]
# print new_box[2]/float(new_box[3])
if new_box[2]/float(new_box[3])>0.5:
# print "异常"
h = int((new_box[2]/0.35 )/2)
if h>35:
h = 35
new_box[1] = center_c[1]- h
if new_box[1]<0:
new_box[1] = 1
new_box[3] = h*2
section = section[new_box[1]:new_box[1]+new_box[3],new_box[0]:new_box[0]+new_box[2]]
# cv2.imshow("section",section)
# cv2.waitKey(0)
new_sections.append(section)
# print new_box
return new_sections
def slidingWindowsEval(image):
windows_size = 16;
stride = 1
height= image.shape[0]
t0 = time.time()
data_sets = []
for i in range(0,image.shape[1]-windows_size+1,stride):
data = image[0:height,i:i+windows_size]
data = cv2.resize(data,(23,23))
# cv2.imshow("image",data)
data = cv2.equalizeHist(data)
data = data.astype(np.float)/255
data= np.expand_dims(data,3)
data_sets.append(data)
res = model.predict(np.array(data_sets))
print "分割",time.time() - t0
pin = res
p = 1 - (res.T)[1]
p = f.gaussian_filter1d(np.array(p,dtype=np.float),3)
lmin = l.argrelmax(np.array(p),order = 3)[0]
interval = []
for i in xrange(len(lmin)-1):
interval.append(lmin[i+1]-lmin[i])
if(len(interval)>3):
mid = get_median(interval)
else:
return []
pin = np.array(pin)
res = searchOptimalCuttingPoint(image,pin,0,mid,3)
cutting_pts = res[1]
last = cutting_pts[-1] + mid
if last < image.shape[1]:
cutting_pts.append(last)
else:
cutting_pts.append(image.shape[1]-1)
name = ""
confidence =0.00
seg_block = []
for x in xrange(1,len(cutting_pts)):
if x != len(cutting_pts)-1 and x!=1:
section = image[0:36,cutting_pts[x-1]-2:cutting_pts[x]+2]
elif x==1:
c_head = cutting_pts[x - 1]- 2
if c_head<0:
c_head=0
c_tail = cutting_pts[x] + 2
section = image[0:36, c_head:c_tail]
elif x==len(cutting_pts)-1:
end = cutting_pts[x]
diff = image.shape[1]-end
c_head = cutting_pts[x - 1]
c_tail = cutting_pts[x]
if diff<7 :
section = image[0:36, c_head-5:c_tail+5]
else:
diff-=1
section = image[0:36, c_head - diff:c_tail + diff]
elif x==2:
section = image[0:36, cutting_pts[x - 1] - 3:cutting_pts[x-1]+ mid]
else:
section = image[0:36,cutting_pts[x-1]:cutting_pts[x]]
seg_block.append(section)
refined = refineCrop(seg_block,mid-1)
t0 = time.time()
for i,one in enumerate(refined):
res_pre = cRP.SimplePredict(one, i )
# cv2.imshow(str(i),one)
# cv2.waitKey(0)
confidence+=res_pre[0]
name+= res_pre[1]
print "字符识别",time.time() - t0
return refined,name,confidence
| apache-2.0 |
CforED/Machine-Learning | examples/model_selection/grid_search_digits.py | 44 | 2672 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/models/pca_test.py | 12 | 12264 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' test cases for Pricipal Components Analysis'''
import unittest
import numpy as np
from sparktkregtests.lib import sparktk_test
class PrincipalComponent(sparktk_test.SparkTKTestCase):
# expected singular values
expected_singular_val = [3373.70412657, 594.11385671,
588.713470217, 584.157023124,
579.433395835, 576.659495077,
572.267630461, 568.224352464,
567.328732759, 560.882281619]
# expected right-singular vectors V
expected_R_singular_vec = \
[[0.315533916, -0.3942771, 0.258362247, -0.0738539198,
-0.460673735, 0.0643077298, -0.0837131184, 0.0257963888,
0.00376728499, 0.669876972],
[0.316500921, -0.165508013, -0.131017612, 0.581988787,
-0.0863507191, 0.160473134, 0.53134635, 0.41199152,
0.0823770991, -0.156517367],
[0.316777341, 0.244415549, 0.332413311, -0.377379981,
0.149653873, 0.0606339992, -0.163748261, 0.699502817,
-0.171189721, -0.124509149],
[0.318988109, -0.171520719, -0.250278714, 0.335635209,
0.580901954, 0.160427725, -0.531610364, -0.0304943121,
-0.0785743304, 0.201591811],
[0.3160833, 0.000386702461, -0.108022985, 0.167086405,
-0.470855879, -0.256296677, -0.318727111, -0.155621638,
-0.521547782, -0.418681224],
[0.316721742, 0.288319245, 0.499514144, 0.267566455,
-0.0338341451, -0.134086469, -0.184724393, -0.246523528,
0.593753078, -0.169969303],
[0.315335647, -0.258529064, 0.374780341, -0.169762381,
0.416093803, -0.118232778, 0.445019707, -0.395962728,
-0.337229123, -0.0937071881],
[0.314899154, -0.0294147958, -0.447870311, -0.258339192,
0.0794841625, -0.71141762, 0.110951688, 0.102784186,
0.292018251, 0.109836478],
[0.315542865, -0.236497774, -0.289051199, -0.452795684,
-0.12175352, 0.5265342, -0.0312645934, -0.180142504,
0.318334436, -0.359303747],
[0.315875856, 0.72196434, -0.239088332, -0.0259999274,
-0.0579153559, 0.244335633, 0.232808362, -0.233600306,
-0.181191102, 0.3413174]]
def setUp(self):
super(PrincipalComponent, self).setUp()
schema = [("X1", int),
("X2", int),
("X3", int),
("X4", int),
("X5", int),
("X6", int),
("X7", int),
("X8", int),
("X9", int),
("X10", int)]
train_data = self.get_file("pcadata.csv")
self.frame = self.context.frame.import_csv(train_data, schema=schema)
def test_pca_train_mean(self):
"""Test the train functionality with mean centering"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
True, 10)
# expected right singular vec
expected_R_singular_vec = \
[[-0.549152471, 0.134453636, -0.180671889, 0.234894134,
-0.389490687, 0.046168368, 0.009878424, 0.019946751,
0.233954879, -0.61925063],
[-0.3107595, 0.128379, 0.2736294, -0.4989054,
-0.1581082, 0.2696197, -0.4982982, 0.3490321,
0.2020135, 0.2335396],
[-0.0006266413, 0.4132347439, -0.2091129256, 0.5057322262,
0.284534956, -0.0781849994, -0.296977603, 0.5521891815,
-0.2011568782, 0.0953815474],
[-0.41249677, 0.18236745, 0.42299696, -0.14383198,
0.66920159, -0.02091557, 0.30218101, -0.02264204,
-0.08469958, -0.22294887],
[-0.2861126, 0.3315825, 0.3176644, 0.1456762,
-0.3922746, -0.335028, -0.0245096, -0.3381778,
-0.4412639, 0.332052],
[0.04075956, 0.57043353, -0.27078586, -0.19663536,
-0.05769559, -0.13220315, 0.47465249, 0.0354921,
0.46979934, 0.30476426],
[-0.277264548, -0.007875559, -0.42557711, -0.001052,
0.369406414, -0.08977938, -0.458086887, -0.562792577,
0.197740323, 0.178167367],
[-0.00412614, -0.28572649, 0.32251042, 0.17745651,
0.02287231, -0.70143181, -0.11910508, 0.17190172,
0.49504182, 0.03882287],
[-0.250589207, -0.241806057, 0.184786981, 0.530948245,
0.006698866, 0.486627069, 0.222611629, -0.031699086,
0.265930864, 0.452274076],
[0.45661449, 0.43116941, 0.4199038, 0.21489633,
0.01645086, 0.22809883, -0.26946427, -0.33081525,
0.2941181, -0.2494827]]
# expected singular values
expected_singular_val= \
[596.5517, 590.2738, 588.3024, 582.7042, 579.1695,
576.3667, 569.8798, 568.0740, 563.0158, 560.6463]
# actual right-singular vectors
actual_R_singular_vec = pca_model.right_singular_vectors
# actual singular values
actual_singular_val = pca_model.singular_values
self.assertEqual(np.allclose(
np.array(actual_singular_val),
np.array(expected_singular_val)), True)
self.assertEqual(np.allclose(np.absolute(
np.array(actual_R_singular_vec)),
np.absolute(np.array(expected_R_singular_vec)),
atol=1e-04), True)
def test_pca_predict(self):
"""Test the train functionality"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
pca_model.predict(
self.frame, mean_centered=False)
pd_frame = self.frame.to_pandas(self.frame.count())
actual_R_singular_vec = map(
list, zip(*pca_model.right_singular_vectors))
for index, value in pd_frame.iterrows():
vec1 = value[0:10]
vec2 = value[10:]
dot_product = [sum([(r1)*(r2) for r1, r2 in zip(vec1, k)])
for k in actual_R_singular_vec]
for v,d in zip(vec2, dot_product):
self.assertAlmostEqual(v, d)
def test_pca_train(self):
"""Test the train functionality"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_model.right_singular_vectors
# actual singular values
actual_singular_val = pca_model.singular_values
self.assertEqual(np.allclose(
np.array(actual_singular_val),
np.array(self.expected_singular_val)), True)
self.assertEqual(np.allclose(np.absolute(
np.array(actual_R_singular_vec)),
np.absolute(np.array(self.expected_R_singular_vec)),
atol=1e-04), True)
def test_pca_save_load(self):
"""Test the save and load functionalities"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
path = self.get_name("pca_model_")
pca_model.save(path)
restored_model = self.context.load(path)
self.assertEqual(restored_model.k, 10)
def test_pca_default(self):
"""Test default no. of k"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False)
# actual right-singular vectors
actual_R_singular_vec = pca_model.right_singular_vectors
# actual singular values
actual_singular_val = pca_model.singular_values
self.assertEqual(np.allclose(
np.array(actual_singular_val),
np.array(self.expected_singular_val)), True)
self.assertEqual(np.allclose(np.absolute(
np.array(actual_R_singular_vec)),
np.absolute(np.array(self.expected_R_singular_vec)),
atol=1e-06), True)
def test_pca_bad_no_of_k(self):
"""Test invalid k value in train"""
with self.assertRaisesRegexp(
Exception, "k.*number of observation columns"):
self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 11)
def test_pca_invalid_k(self):
"""Test k < 1 in train"""
with self.assertRaisesRegexp(
Exception, ".* greater than equal to 1"):
self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 0)
def test_pca_bad_column_name(self):
"""Test bad feature column name"""
with self.assertRaisesRegexp(
Exception, "column ERR was not found"):
self.context.models.dimreduction.pca.train(
self.frame,
["ERR", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
def test_pca_bad_column_type(self):
"""Test bad feature column name type"""
with self.assertRaisesRegexp(
Exception, "\'int\' object is not iterable"):
self.context.models.dimreduction.pca.train(
self.frame, 10, False, 10)
def test_pca_orthogonality(self):
"""Test orthogonality of resulting vectors"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_model.right_singular_vectors
res_mat = np.array(actual_R_singular_vec)
res_tran = res_mat.transpose()
derived_id = np.mat(res_mat)*np.mat(res_tran)
self.assertEqual(np.allclose(derived_id, np.identity(10)), True)
def test_pca_singular_values(self):
"""Test for positive singular values"""
pca_model = self.context.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
actual_singular_val = pca_model.singular_values
for val in actual_singular_val:
self.assertGreaterEqual(val, 0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
CCI-Tools/ect-core | test/util/im/test_cmaps.py | 2 | 3391 | from unittest import TestCase
from cate.util.im.cmaps import get_cmaps
class CmapsTest(TestCase):
def test_get_cmaps_returns_singleton(self):
cmaps = get_cmaps()
self.assertIs(cmaps, get_cmaps())
self.assertIs(cmaps, get_cmaps())
def test_get_cmaps_retruns_equal_size_recs(self):
cmaps = get_cmaps()
rec_len = len(cmaps[0])
self.assertEqual(rec_len, 3)
for cmap in cmaps:
self.assertEqual(len(cmap), rec_len)
def test_get_cmaps_categories(self):
cmaps = get_cmaps()
self.assertGreaterEqual(len(cmaps), 6)
self.assertEqual(cmaps[0][0], 'Perceptually Uniform Sequential')
self.assertEqual(cmaps[1][0], 'Sequential 1')
self.assertEqual(cmaps[2][0], 'Sequential 2')
self.assertEqual(cmaps[3][0], 'Diverging')
self.assertEqual(cmaps[4][0], 'Qualitative')
self.assertEqual(cmaps[5][0], 'Miscellaneous')
def test_get_cmaps_category_descr(self):
cmaps = get_cmaps()
self.assertEqual(cmaps[0][1], 'For many applications, a perceptually uniform colormap is the best choice - '
'one in which equal steps in data are perceived as equal steps in the color '
'space')
def test_get_cmaps_category_tuples(self):
cmaps = get_cmaps()
category_tuple = cmaps[0][2]
self.assertEqual(len(category_tuple), 4)
self.assertEqual(category_tuple[0][0], 'viridis')
self.assertEqual(category_tuple[0][1],
'iVBORw0KGgoAAAANSUhEUgAAAQAAAAACCAYAAAC3zQLZAAAAzklEQVR4nO2TQZLFIAhEX7dXmyPM/Y8SZwEqMcnU3/9QZTU8GszC6Ee/HQlk5FAsJIENqVGv/piZ3uqf3nX6Vtd+l8D8UwNOLhZL3+BLh796OXvMdWaqtrrqnZ/tjvuZT/0XxnN/5f25z9X7tIMTKzV7/5yrME3NHoPlUzvplgOevOcz6ZO5eCqzOmark1nHDQveHuuYaazZkTcdmE110HJu6doR3tgfPHyL51zNc0fd2xjf0vPukUPL36YBTcpcWArFyY0RTca88cYbXxt/gUOJC8yRF1kAAAAASUVORK5CYII=')
self.assertEqual(category_tuple[1][0], 'inferno')
self.assertEqual(category_tuple[2][0], 'plasma')
self.assertEqual(category_tuple[3][0], 'magma')
def main():
cmaps = get_cmaps()
html_head = '<!DOCTYPE html>\n' + \
'<html lang="en">\n' + \
'<head>' + \
'<meta charset="UTF-8">' + \
'<title>matplotlib Color Maps</title>' + \
'</head>\n' + \
'<body style="padding: 0.2em">\n'
html_body = ''
html_foot = '</body>\n' + \
'</html>\n'
for cmap_cat, cmap_desc, cmap_bars in cmaps:
html_body += ' <h2>%s</h2>\n' % cmap_cat
html_body += ' <p><i>%s</i></p>\n' % cmap_desc
html_body += ' <table style=border: 0">\n'
for cmap_bar in cmap_bars:
cmap_name, cmap_data = cmap_bar
cmap_image = '<img src="data:image/png;base64,%s" width="100%%" height="20px"/>' % cmap_data
html_body += ' <tr><td style="width: 5em">%s:</td><td style="width: 40em">%s</td></tr>\n' % (
cmap_name, cmap_image)
html_body += ' </table>\n'
html_page = html_head + html_body + html_foot
html_filename = 'test_cmaps.html'
with open(html_filename, 'w') as fp:
fp.write(html_page)
import webbrowser
webbrowser.open_new_tab(html_filename)
if __name__ == '__main__':
main()
| mit |
phiedulxp/tweet_search | experiment/ea/make_tweet_tokenized_data_worker.py | 1 | 2519 | import os
import re
import codecs
import random
import multiprocessing
from multiprocessing import Pool
from tqdm import tqdm
import pandas as pd
import spacy
import json
from bson import json_util
import redis
r = redis.StrictRedis(host='localhost',port=6379, db=0)
import pymongo
client = pymongo.MongoClient('localhost:27017')
db = client.tweet
if not os.path.exists('./stories/'): os.mkdir('./stories/')
if not os.path.exists('./story_fnames/'): os.mkdir('./story_fnames/')
nlp = spacy.load('en_core_web_md')
events = [e for e in db.current_event.find({},{'_id':1,'abstracts':1,'type':1})]
def get_dataset(event):
abst_list = [i['abstract'] for i in event['abstracts']]
abst = ''.join(abst_list)
doc_reference = nlp(abst)
tweets = []
filter_dict = {'event_id':event['_id'],'tweet.lang':'en','tweet.media.card_url':None}
query_dict = {'tweet.standard_text':1}
records = [i for i in db.pos.find(filter_dict,query_dict)]+[i for i in db.paper.find(filter_dict,query_dict)]
for tweet in records:
tweet_id = tweet['_id']
tweet_text = re.sub('https?:\/\/\w+\.\w+\/\w+','',tweet['tweet']['standard_text'])
doc_tweet = nlp(tweet_text)
tweets.append((tweet_id,doc_reference.similarity(doc_tweet),tweet_text))
tweets = sorted(tweets,key=lambda x:x[1],reverse=True)
df_candidate = pd.DataFrame.from_records(tweets,index=range(len(tweets)),columns=['id','simi','text'])
df_candidate = df_candidate.drop_duplicates(['simi'])
return abst_list,df_candidate[df_candidate['simi'] > 0.75][:100].sort_values(by='id')['text'].tolist()
def wite_tokenized_story(queue):
event_id = json.loads(queue['event_id'],object_hook=json_util.object_hook)
event = db.current_event.find_one({'_id':event_id},{'_id':1,'abstracts':1,'type':1})
story_file_name = str(event['_id'])+'_'+str(event['type'])+'.story'
if not os.path.exists('./stories/'+story_file_name):
print(story_file_name)
abst,cadi = get_dataset(event)
with codecs.open('./stories/'+story_file_name,'a',encoding='utf-8') as f:
for i in cadi:
doc = nlp(i)
f.write(' '.join([token.text for token in doc])+'\n\n')
for i in abst:
doc = nlp(i)
f.write('@highlight\n\n')
f.write(' '.join([token.text for token in doc])+'\n\n')
if __name__ == '__main__':
print('data_worker start!')
while True:
queue = r.lpop('task:data')
if queue:
print('data_worker process!')
craw = wite_tokenized_story(json.loads(queue)) | mit |
rahuldhote/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
cauchycui/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/animation/old_animation/animation_blit_tk.py | 4 | 1202 | # For detailed comments on animation and the techniqes used here, see
# the wiki entry http://www.scipy.org/Cookbook/Matplotlib/Animations
import matplotlib
matplotlib.use('TkAgg')
import sys
import pylab as p
import numpy as npy
import time
ax = p.subplot(111)
canvas = ax.figure.canvas
# create the initial line
x = npy.arange(0,2*npy.pi,0.01)
line, = p.plot(x, npy.sin(x), animated=True, lw=2)
def run(*args):
background = canvas.copy_from_bbox(ax.bbox)
# for profiling
tstart = time.time()
while 1:
# restore the clean slate background
canvas.restore_region(background)
# update the data
line.set_ydata(npy.sin(x+run.cnt/10.0))
# just draw the animated artist
ax.draw_artist(line)
# just redraw the axes rectangle
canvas.blit(ax.bbox)
if run.cnt==1000:
# print the timing info and quit
print 'FPS:' , 1000/(time.time()-tstart)
sys.exit()
run.cnt += 1
run.cnt = 0
p.subplots_adjust(left=0.3, bottom=0.3) # check for flipy bugs
p.grid() # to ensure proper background restore
manager = p.get_current_fig_manager()
manager.window.after(100, run)
p.show()
| gpl-2.0 |
giorgiop/scikit-learn | sklearn/calibration.py | 18 | 19402 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y``
is neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
paix120/DataScienceLearningClubActivities | Activity06/jenks.py | 1 | 3584 | #original code from: https://gist.github.com/drewda/1299198
#note: danieljlewis site referenced there doesn't appear to exist anymore
def getJenksBreaks( dataList, numClass ):
print("starting getJenksBreaks function...")
dataList.sort()
mat1 = []
for i in range(0,len(dataList)+1):
temp = []
for j in range(0,numClass+1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(0,len(dataList)+1):
temp = []
for j in range(0,numClass+1):
temp.append(0)
mat2.append(temp)
for i in range(1,numClass+1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2,len(dataList)+1):
mat2[j][i] = float('inf')
v = 0.0
for l in range(2,len(dataList)+1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1,l+1):
i3 = l - m + 1
val = float(dataList[i3-1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2,numClass+1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(dataList)
kclass = []
for i in range(0,numClass+1):
kclass.append(0)
kclass[numClass] = float(dataList[len(dataList) - 1])
countNum = numClass
while countNum >= 2:#print "rank = " + str(mat1[k][countNum])
id = int((mat1[k][countNum]) - 2)
#print "val = " + str(dataList[id])
kclass[countNum - 1] = dataList[id]
k = int((mat1[k][countNum] - 1))
countNum -= 1
print(kclass)
return kclass
def getGVF( dataList, numClass ):
print("starting getGVF function...")
"""
The Goodness of Variance Fit (GVF) is found by taking the
difference between the squared deviations
from the array mean (SDAM) and the squared deviations from the
class means (SDCM), and dividing by the SDAM
"""
breaks = getJenksBreaks(dataList, numClass)
dataList.sort()
listMean = sum(dataList)/len(dataList)
print(listMean)
SDAM = 0.0
for i in range(0,len(dataList)):
sqDev = (dataList[i] - listMean)**2
SDAM += sqDev
SDCM = 0.0
for i in range(0,numClass):
if breaks[i] == 0:
classStart = 0
else:
classStart = dataList.index(breaks[i])
classStart += 1
#***error here because numpy array has no attribute index (expecting regular python list, I guess)
classEnd = dataList.index(breaks[i+1])
classList = dataList[classStart:classEnd+1]
classMean = sum(classList)/len(classList)
print(classMean)
preSDCM = 0.0
for j in range(0,len(classList)):
sqDev2 = (classList[j] - classMean)**2
preSDCM += sqDev2
SDCM += preSDCM
return (SDAM - SDCM)/SDAM
# written by Drew
# used after running getJenksBreaks()
def classify(value, breaks):
for i in range(1, len(breaks)):
if value < breaks[i]:
return i
return len(breaks) - 1
import pandas as pd
#import numpy as np
#read in data. it's comma-separated with no column names.
df = pd.read_csv('giftpledgesizes.csv', sep=',', warn_bad_lines=True, low_memory=False)
gifts = df.ix[:,0]
#sklearn error said to reshape the array this way if only one feature
gifts = gifts.reshape(-1, 1)
clusterGVF = getGVF(gifts,15)
| gpl-2.0 |
johnson1228/pymatgen | pymatgen/io/abinit/utils.py | 1 | 28136 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Tools and helper functions for abinit calculations"""
from __future__ import unicode_literals, division, print_function
import os
import six
import re
import collections
import shutil
import operator
import numpy as np
from fnmatch import fnmatch
from six.moves import filter
from monty.collections import dict2namedtuple
from monty.string import list_strings
from monty.fnmatch import WildCard
from monty.shutil import copy_r
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__name__)
def as_bool(s):
"""
Convert a string into a boolean.
>>> assert as_bool(True) is True and as_bool("Yes") is True and as_bool("false") is False
"""
if s in (False, True): return s
# Assume string
s = s.lower()
if s in ("yes", "true"):
return True
elif s in ("no", "false"):
return False
else:
raise ValueError("Don't know how to convert type %s: %s into a boolean" % (type(s), s))
class File(object):
"""
Very simple class used to store file basenames, absolute paths and directory names.
Provides wrappers for the most commonly used functions defined in os.path.
"""
def __init__(self, path):
self._path = os.path.abspath(path)
def __repr__(self):
return "<%s at %s, %s>" % (self.__class__.__name__, id(self), self.path)
def __str__(self):
return "<%s, %s>" % (self.__class__.__name__, self.path)
def __eq__(self, other):
return False if other is None else self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
@property
def path(self):
"""Absolute path of the file."""
return self._path
@property
def basename(self):
"""File basename."""
return os.path.basename(self.path)
@property
def relpath(self):
"""Relative path."""
try:
return os.path.relpath(self.path)
except OSError:
# current working directory may not be defined!
return self.path
@property
def dirname(self):
"""Absolute path of the directory where the file is located."""
return os.path.dirname(self.path)
@property
def exists(self):
"""True if file exists."""
return os.path.exists(self.path)
@property
def isncfile(self):
"""True if self is a NetCDF file"""
return self.basename.endswith(".nc")
def chmod(self, mode):
"""Change the access permissions of a file."""
os.chmod(self.path, mode)
def read(self):
"""Read data from file."""
with open(self.path, "r") as f:
return f.read()
def readlines(self):
"""Read lines from files."""
with open(self.path, "r") as f:
return f.readlines()
def write(self, string):
"""Write string to file."""
self.make_dir()
with open(self.path, "w") as f:
if not string.endswith("\n"):
return f.write(string + "\n")
else:
return f.write(string)
def writelines(self, lines):
"""Write a list of strings to file."""
self.make_dir()
with open(self.path, "w") as f:
return f.writelines(lines)
def make_dir(self):
"""Make the directory where the file is located."""
if not os.path.exists(self.dirname):
os.makedirs(self.dirname)
def remove(self):
"""Remove the file."""
try:
os.remove(self.path)
except:
pass
def move(self, dst):
"""
Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
"""
shutil.move(self.path, dst)
def get_stat(self):
"""Results from os.stat"""
return os.stat(self.path)
def getsize(self):
"""
Return the size, in bytes, of path.
Return 0 if the file is empty or it does not exist.
"""
if not self.exists: return 0
return os.path.getsize(self.path)
class Directory(object):
"""
Very simple class that provides helper functions
wrapping the most commonly used functions defined in os.path.
"""
def __init__(self, path):
self._path = os.path.abspath(path)
def __repr__(self):
return "<%s at %s, %s>" % (self.__class__.__name__, id(self), self.path)
def __str__(self):
return self.path
def __eq__(self, other):
return False if other is None else self.path == other.path
def __ne__(self, other):
return not self.__eq__(other)
@property
def path(self):
"""Absolute path of the directory."""
return self._path
@property
def relpath(self):
"""Relative path."""
return os.path.relpath(self.path)
@property
def basename(self):
"""Directory basename."""
return os.path.basename(self.path)
def path_join(self, *p):
"""
Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components will be discarded.
"""
return os.path.join(self.path, *p)
@property
def exists(self):
"""True if file exists."""
return os.path.exists(self.path)
def makedirs(self):
"""
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist.
"""
if not self.exists:
os.makedirs(self.path)
def rmtree(self):
"""Recursively delete the directory tree"""
shutil.rmtree(self.path, ignore_errors=True)
def copy_r(self, dst):
"""
Implements a recursive copy function similar to Unix's "cp -r" command.
"""
return copy_r(self.path, dst)
def clean(self):
"""Remove all files in the directory tree while preserving the directory"""
for path in self.list_filepaths():
try:
os.remove(path)
except:
pass
def path_in(self, file_basename):
"""Return the absolute path of filename in the directory."""
return os.path.join(self.path, file_basename)
def list_filepaths(self, wildcard=None):
"""
Return the list of absolute filepaths in the directory.
Args:
wildcard: String of tokens separated by "|". Each token represents a pattern.
If wildcard is not None, we return only those files that match the given shell pattern (uses fnmatch).
Example:
wildcard="*.nc|*.pdf" selects only those files that end with .nc or .pdf
"""
# Select the files in the directory.
fnames = [f for f in os.listdir(self.path)]
filepaths = filter(os.path.isfile, [os.path.join(self.path, f) for f in fnames])
# Filter using the shell patterns.
if wildcard is not None:
filepaths = WildCard(wildcard).filter(filepaths)
return filepaths
def has_abiext(self, ext):
"""
Returns the absolute path of the ABINIT file with extension ext.
Support both Fortran files and netcdf files. In the later case,
we check whether a file with extension ext + ".nc" is present
in the directory. Returns empty string is file is not present.
Raises:
`ValueError` if multiple files with the given ext are found.
This implies that this method is not compatible with multiple datasets.
"""
if ext != "abo":
ext = ext if ext.startswith('_') else '_' + ext
files = []
for f in self.list_filepaths():
# For the time being, we ignore DDB files in nc format.
if ext == "_DDB" and f.endswith(".nc"): continue
# Ignore BSE text files e.g. GW_NLF_MDF
if ext == "_MDF" and not f.endswith(".nc"): continue
if f.endswith(ext) or f.endswith(ext + ".nc"):
files.append(f)
# This should fix the problem with the 1WF files in which the file extension convention is broken
if not files:
files = [f for f in self.list_filepaths() if fnmatch(f, "*%s*" % ext)]
if not files:
return ""
if len(files) > 1:
# ABINIT users must learn that multiple datasets are bad!
raise ValueError("Found multiple files with the same extensions:\n %s\n" % files +
"Please avoid using mutiple datasets!")
return files[0]
def symlink_abiext(self, inext, outext):
"""
Create a simbolic link (outext --> inext). The file names are implicitly
given by the ABINIT file extension.
Example:
outdir.symlink_abiext('1WF', 'DDK')
creates the link out_DDK that points to out_1WF
Return: 0 if success.
Raise: RuntimeError
"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
if os.path.exists(outfile):
if os.path.islink(outfile):
if os.path.realpath(outfile) == infile:
logger.debug("link %s already exists but it's ok because it points to the correct file" % outfile)
return 0
else:
raise RuntimeError("Expecting link at %s already exists but it does not point to %s" % (outfile, infile))
else:
raise RuntimeError('Expecting link at %s but found file.' % outfile)
os.symlink(infile, outfile)
return 0
def rename_abiext(self, inext, outext):
"""Rename the Abinit file with extension inext with the new extension outext"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
shutil.move(infile, outfile)
return 0
def copy_abiext(self, inext, outext):
"""Copy the Abinit file with extension inext to a new file withw extension outext"""
infile = self.has_abiext(inext)
if not infile:
raise RuntimeError('no file with extension %s in %s' % (inext, self))
for i in range(len(infile) - 1, -1, -1):
if infile[i] == '_':
break
else:
raise RuntimeError('Extension %s could not be detected in file %s' % (inext, infile))
outfile = infile[:i] + '_' + outext
shutil.copy(infile, outfile)
return 0
def remove_exts(self, exts):
"""
Remove the files with the given extensions. Unlike rmtree, this function preserves the directory path.
Return list with the absolute paths of the files that have been removed.
"""
paths = []
for ext in list_strings(exts):
path = self.has_abiext(ext)
if not path: continue
try:
os.remove(path)
paths.append(path)
except IOError:
logger.warning("Exception while trying to remove file %s" % path)
return paths
def find_last_timden_file(self):
"""
ABINIT produces lots of out_TIM1_DEN files for each step and we need to find the lat
one in order to prepare the restart or to connect other tasks to the structural relaxation.
This function finds all the TIM?_DEN files in self and return a namedtuple (path, step)
where `path` is the path of the last TIM?_DEN file and step is the iteration number.
Returns None if the directory does not contain TIM?_DEN files.
"""
regex = re.compile(r"out_TIM(\d+)_DEN(.nc)?$")
timden_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not timden_paths: return None
# Build list of (step, path) tuples.
stepfile_list = []
for path in timden_paths:
name = os.path.basename(path)
match = regex.match(name)
step, ncext = match.groups()
stepfile_list.append((int(step), path))
# DSU sort.
last = sorted(stepfile_list, key=lambda t: t[0])[-1]
return dict2namedtuple(step=last[0], path=last[1])
def find_1wf_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index.
"""
regex = re.compile(r"out_1WF(\d+)(\.nc)?$")
wf_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not wf_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in wf_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list]
def find_1den_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1DEN file and this breaks the extension
e.g. out_DEN1. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1DEN file and the `pertcase` index.
"""
regex = re.compile(r"out_DEN(\d+)(\.nc)?$")
den_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not den_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in den_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list]
# This dictionary maps ABINIT file extensions to the variables that must be used to read the file in input.
#
# TODO: It would be nice to pass absolute paths to abinit with getden_path
# so that I can avoid creating symbolic links before running but
# the presence of the C-bindings complicates the implementation
# (gfortran SIGFAULTs if I add strings to dataset_type!
_EXT2VARS = {
"DEN": {"irdden": 1},
"WFK": {"irdwfk": 1},
"WFQ": {"irdwfq": 1},
"SCR": {"irdscr": 1},
"QPS": {"irdqps": 1},
"1WF": {"ird1wf": 1},
"1DEN": {"ird1den": 1},
"BSR": {"irdbsreso": 1},
"BSC": {"irdbscoup": 1},
"HAYDR_SAVE": {"irdhaydock": 1},
"DDK": {"irdddk": 1},
"DDB": {},
"DVDB": {},
"GKK": {},
"DKK": {},
}
def irdvars_for_ext(ext):
"""
Returns a dictionary with the ABINIT variables
that must be used to read the file with extension ext.
"""
return _EXT2VARS[ext].copy()
def abi_extensions():
"""List with all the ABINIT extensions that are registered."""
return list(_EXT2VARS.keys())[:]
def abi_splitext(filename):
"""
Split the ABINIT extension from a filename.
"Extension" are found by searching in an internal database.
Returns "(root, ext)" where ext is the registered ABINIT extension
The final ".nc" is included (if any)
>>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK')
>>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
"""
filename = os.path.basename(filename)
is_ncfile = False
if filename.endswith(".nc"):
is_ncfile = True
filename = filename[:-3]
known_extensions = abi_extensions()
# This algorith fails if we have two files
# e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE
for i in range(len(filename)-1, -1, -1):
ext = filename[i:]
if ext in known_extensions:
break
else:
raise ValueError("Cannot find a registered extension in %s" % filename)
root = filename[:i]
if is_ncfile:
ext += ".nc"
return root, ext
class FilepathFixer(object):
"""
This object modifies the names of particular output files
produced by ABINIT so that the file extension is preserved.
Having a one-to-one mapping between file extension and data format
is indeed fundamental for the correct behaviour of abinit since:
- We locate the output file by just inspecting the extension
- We select the variables that must be added to the input file
on the basis of the extension specified by the user during
the initialization of the `AbinitFlow`.
Unfortunately, ABINIT developers like to append extra stuff
to the initial extension and therefore we have to call
`FilepathFixer` to fix the output files produced by the run.
Example:
>>> fixer = FilepathFixer()
>>> assert fixer.fix_paths('/foo/out_1WF17') == {'/foo/out_1WF17': '/foo/out_1WF'}
>>> assert fixer.fix_paths('/foo/out_1WF5.nc') == {'/foo/out_1WF5.nc': '/foo/out_1WF.nc'}
"""
def __init__(self):
# dictionary mapping the *official* file extension to
# the regular expression used to tokenize the basename of the file
# To add a new file it's sufficient to add a new regexp and
# a static method _fix_EXTNAME
self.regs = regs = {}
import re
regs["1WF"] = re.compile(r"(\w+_)1WF(\d+)(\.nc)?$")
regs["1DEN"] = re.compile(r"(\w+_)1DEN(\d+)(\.nc)?$")
@staticmethod
def _fix_1WF(match):
root, pert, ncext = match.groups()
if ncext is None: ncext = ""
return root + "1WF" + ncext
@staticmethod
def _fix_1DEN(match):
root, pert, ncext = match.groups()
if ncext is None: ncext = ""
return root + "1DEN" + ncext
def _fix_path(self, path):
for ext, regex in self.regs.items():
head, tail = os.path.split(path)
match = regex.match(tail)
if match:
newtail = getattr(self, "_fix_" + ext)(match)
newpath = os.path.join(head, newtail)
return newpath, ext
return None, None
def fix_paths(self, paths):
"""
Fix the filenames in the iterable paths
Returns:
old2new: Mapping old_path --> new_path
"""
old2new, fixed_exts = {}, []
for path in list_strings(paths):
newpath, ext = self._fix_path(path)
if newpath is not None:
#if ext not in fixed_exts:
# if ext == "1WF": continue
# raise ValueError("Unknown extension %s" % ext)
#print(ext, path, fixed_exts)
#if ext != '1WF':
# assert ext not in fixed_exts
if ext not in fixed_exts:
if ext == "1WF": continue
raise ValueError("Unknown extension %s" % ext)
fixed_exts.append(ext)
old2new[path] = newpath
return old2new
def _bop_not(obj):
"""Boolean not."""
return not bool(obj)
def _bop_and(obj1, obj2):
"""Boolean and."""
return bool(obj1) and bool(obj2)
def _bop_or(obj1, obj2):
"""Boolean or."""
return bool(obj1) or bool(obj2)
def _bop_divisible(num1, num2):
"""Return True if num1 is divisible by num2."""
return (num1 % num2) == 0.0
# Mapping string --> operator.
_UNARY_OPS = {
"$not": _bop_not,
}
_BIN_OPS = {
"$eq": operator.eq,
"$ne": operator.ne,
"$gt": operator.gt,
"$ge": operator.ge,
"$gte": operator.ge,
"$lt": operator.lt,
"$le": operator.le,
"$lte": operator.le,
"$divisible": _bop_divisible,
"$and": _bop_and,
"$or": _bop_or,
}
_ALL_OPS = list(_UNARY_OPS.keys()) + list(_BIN_OPS.keys())
def map2rpn(map, obj):
"""
Convert a Mongodb-like dictionary to a RPN list of operands and operators.
Reverse Polish notation (RPN) is a mathematical notation in which every
operator follows all of its operands, e.g.
3 - 4 + 5 --> 3 4 - 5 +
>>> d = {2.0: {'$eq': 1.0}}
>>> assert map2rpn(d, None) == [2.0, 1.0, '$eq']
"""
rpn = []
for k, v in map.items():
if k in _ALL_OPS:
if isinstance(v, collections.Mapping):
# e.g "$not": {"$gt": "one"}
# print("in op_vmap",k, v)
values = map2rpn(v, obj)
rpn.extend(values)
rpn.append(k)
elif isinstance(v, (list, tuple)):
# e.g "$and": [{"$not": {"one": 1.0}}, {"two": {"$lt": 3}}]}
# print("in_op_list",k, v)
for d in v:
rpn.extend(map2rpn(d, obj))
rpn.append(k)
else:
# Examples
# 1) "$eq"": "attribute_name"
# 2) "$eq"": 1.0
try:
#print("in_otherv",k, v)
rpn.append(getattr(obj, v))
rpn.append(k)
except TypeError:
#print("in_otherv, raised",k, v)
rpn.extend([v, k])
else:
try:
k = getattr(obj, k)
except TypeError:
k = k
if isinstance(v, collections.Mapping):
# "one": {"$eq": 1.0}}
values = map2rpn(v, obj)
rpn.append(k)
rpn.extend(values)
else:
#"one": 1.0
rpn.extend([k, v, "$eq"])
return rpn
def evaluate_rpn(rpn):
"""
Evaluates the RPN form produced my map2rpn.
Returns:
bool
"""
vals_stack = []
for item in rpn:
if item in _ALL_OPS:
# Apply the operator and push to the task.
v2 = vals_stack.pop()
if item in _UNARY_OPS:
res = _UNARY_OPS[item](v2)
elif item in _BIN_OPS:
v1 = vals_stack.pop()
res = _BIN_OPS[item](v1, v2)
else:
raise ValueError("%s not in unary_ops or bin_ops" % str(item))
vals_stack.append(res)
else:
# Push the operand
vals_stack.append(item)
#print(vals_stack)
assert len(vals_stack) == 1
assert isinstance(vals_stack[0], bool)
return vals_stack[0]
class Condition(object):
"""
This object receives a dictionary that defines a boolean condition whose syntax is similar
to the one used in mongodb (albeit not all the operators available in mongodb are supported here).
Example:
$gt: {field: {$gt: value} }
$gt selects those documents where the value of the field is greater than (i.e. >) the specified value.
$and performs a logical AND operation on an array of two or more expressions (e.g. <expression1>, <expression2>, etc.)
and selects the documents that satisfy all the expressions in the array.
{ $and: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
Consider the following example:
db.inventory.find( { qty: { $gt: 20 } } )
This query will select all documents in the inventory collection where the qty field value is greater than 20.
Consider the following example:
db.inventory.find( { qty: { $gt: 20 } } )
db.inventory.find({ $and: [ { price: 1.99 }, { qty: { $lt: 20 } }, { sale: true } ] } )
"""
@classmethod
def as_condition(cls, obj):
"""Convert obj into :class:`Condition`"""
if isinstance(obj, cls):
return obj
else:
return cls(cmap=obj)
def __init__(self, cmap=None):
self.cmap = {} if cmap is None else cmap
def __str__(self):
return str(self.cmap)
def __bool__(self):
return bool(self.cmap)
__nonzero__ = __bool__
def __call__(self, obj):
if not self: return True
try:
return evaluate_rpn(map2rpn(self.cmap, obj))
except Exception as exc:
logger.warning("Condition(%s) raised Exception:\n %s" % (type(obj), str(exc)))
return False
class Editor(object):
"""
Wrapper class that calls the editor specified by the user
or the one specified in the $EDITOR env variable.
"""
def __init__(self, editor=None):
"""If editor is None, $EDITOR is used."""
self.editor = os.getenv("EDITOR", "vi") if editor is None else str(editor)
def edit_files(self, fnames, ask_for_exit=True):
exit_status = 0
for idx, fname in enumerate(fnames):
exit_status = self.edit_file(fname)
if ask_for_exit and idx != len(fnames)-1 and self.user_wants_to_exit():
break
return exit_status
def edit_file(self, fname):
from subprocess import call
retcode = call([self.editor, fname])
if retcode != 0:
import warnings
warnings.warn("Error while trying to edit file: %s" % fname)
return retcode
@staticmethod
def user_wants_to_exit():
"""Show an interactive prompt asking if exit is wanted."""
# Fix python 2.x.
if six.PY2:
my_input = raw_input
else:
my_input = input
try:
answer = my_input("Do you want to continue [Y/n]")
except EOFError:
return True
return answer.lower().strip() in ["n", "no"]
class SparseHistogram(object):
def __init__(self, items, key=None, num=None, step=None):
if num is None and step is None:
raise ValueError("Either num or step must be specified")
from collections import defaultdict, OrderedDict
values = [key(item) for item in items] if key is not None else items
start, stop = min(values), max(values)
if num is None:
num = int((stop - start) / step)
if num == 0: num = 1
mesh = np.linspace(start, stop, num, endpoint=False)
from monty.bisect import find_le
hist = defaultdict(list)
for item, value in zip(items, values):
# Find rightmost value less than or equal to x.
# hence each bin contains all items whose value is >= value
pos = find_le(mesh, value)
hist[mesh[pos]].append(item)
#new = OrderedDict([(pos, hist[pos]) for pos in sorted(hist.keys(), reverse=reverse)])
self.binvals = sorted(hist.keys())
self.values = [hist[pos] for pos in self.binvals]
self.start, self.stop, self.num = start, stop, num
@add_fig_kwargs
def plot(self, ax=None, **kwargs):
"""
Plot the histogram with matplotlib, returns `matplotlib` figure.
"""
ax, fig, plt = get_ax_fig_plt(ax)
yy = [len(v) for v in self.values]
ax.plot(self.binvals, yy, **kwargs)
return fig
| mit |
mifumagalli/mypython | ifu/muse_redux_line.py | 1 | 49249 | """
These are sets of procedures optimised for almost empty fields
but with extended line emission
"""
from __future__ import print_function
def individual_resample(listob,refpath='./',nproc=24):
"""
Loop over each OB and re-run scipost using a final coadded cube as
a reference for WCS. This produces cubes that are all regridded to
a common 3D grid with a single interpolation.
listob -> OBs to process
refpath -> where reference path is for WCS resampling
nproc -> numer of processors in parallel runs
"""
import os
import glob
import subprocess
import shutil
from astropy.io import fits
import muse_utils as mut
import numpy as np
#grab top dir
topdir=os.getcwd()
#now loop over each folder and make the final sky-subtracted cubes
for ob in listob:
#change dir
os.chdir(ob+'/Proc/')
#make cubex folder
if not os.path.exists('Line'):
os.makedirs('Line')
#change dir
os.chdir('Line')
print('Processing {} for resampling on reference cube'.format(ob))
#Search how many exposures are there
scils=glob.glob("../Basic/OBJECT_RED_0*.fits*")
nsci=len(scils)
#loop on exposures and reduce frame with sky subtraction
for exp in range(nsci):
#define some output names for final cube
cname="DATACUBE_FINAL_LINEWCS_EXP{0:d}.fits".format(exp+1)
pname="PIXTABLE_REDUCED_LINEWCS_EXP{0:d}.fits".format(exp+1)
iname="IMAGE_FOV_LINEWCS_EXP{0:d}.fits".format(exp+1)
if not os.path.isfile(cname):
print("Processing exposure {0:d} to align to reference".format(exp+1))
#copy sof file written for basic reduction
sof_old=open("../../Script/scipost_{0:d}.sof".format(exp+1))
sof_name="../../Script/scipost_line_{0:d}.sof".format(exp+1)
sofedit=open(sof_name,'w')
#now apply offsets to pixel table
print ('Apply offsets...')
pixtablist=[]
for ll in sof_old:
if('STD_' in ll or 'PIXTABLE_OBJECT' in ll):
fil,tag=ll.split(' ')
sofedit.write("../Basic/"+fil+" "+tag)
else:
sofedit.write(ll)
#Check existence of ABSOLUTE offset list otherwise fall back onto the relative one
if os.path.isfile('../../../{}/OFFSET_LIST_ABS.fits'.format(refpath)):
sofedit.write('../../../{}/OFFSET_LIST_ABS.fits OFFSET_LIST\n'.format(refpath))
else:
sofedit.write('../../../{}/OFFSET_LIST.fits OFFSET_LIST\n'.format(refpath))
#append reference frame to sof file
sofedit.write('../../../{}/DATACUBE_FINAL.fits OUTPUT_WCS\n'.format(refpath))
sofedit.close()
sof_old.close()
#Write the command file
scr=open("../../Script/make_scipost_line_{0:d}.sh".format(exp+1),"w")
scr.write("OMP_NUM_THREADS={0:d}\n".format(nproc))
scr.write('esorex --log-file=scipost_line_{0:d}.log muse_scipost --filter=white --skymethod="none" --save=cube,individual ../../Script/scipost_line_{0:d}.sof'.format(exp+1))
scr.close()
#Run pipeline
subprocess.call(["sh", "../../Script/make_scipost_line_{0:d}.sh".format(exp+1)])
subprocess.call(["mv","DATACUBE_FINAL.fits",cname])
subprocess.call(["mv","IMAGE_FOV_0001.fits",iname])
subprocess.call(["mv","PIXTABLE_REDUCED_0001.fits",pname])
else:
print("Exposure {0:d} exists.. skip! ".format(exp+1))
#back to top
os.chdir(topdir)
def old_individual_resample(listob,refpath='./',nproc=24):
"""
Loop over each OB and re-run scipost using a final coadded cube as
a reference for WCS. This produces cubes that are all regridded to
a common 3D grid with a single interpolation.
listob -> OBs to process
refpath -> where reference path is for WCS resampling
nproc -> numer of processors in parallel runs
"""
import os
import glob
import subprocess
import shutil
from astropy.io import fits
import muse_utils as mut
import numpy as np
#grab top dir
topdir=os.getcwd()
#now loop over each folder and make the final sky-subtracted cubes
for ob in listob:
#change dir
os.chdir(ob+'/Proc/')
#make cubex folder
if not os.path.exists('Line'):
os.makedirs('Line')
#change dir
os.chdir('Line')
print('Processing {} for resampling on reference cube'.format(ob))
#Search how many exposures are there
scils=glob.glob("../Basic/OBJECT_RED_0*.fits*")
nsci=len(scils)
#loop on exposures and reduce frame with sky subtraction
for exp in range(nsci):
if not os.path.isfile('OFFSET_LIST_EXP{0:d}.fits'.format(exp+1)):
print("Compute offsets...")
#create align file
alignsof=open('../../Script/align_toref_{0:d}.sof'.format(exp+1),'w')
alignsof.write("../../../{}/IMAGE_FOV_0001.fits IMAGE_FOV\n".format(refpath))
alignsof.write("../Basic/IMAGE_FOV_EXP{0:d}.fits IMAGE_FOV\n".format(exp+1))
alignsof.close()
#run script align with respect to registered reference cube
alignscr=open('../../Script/make_align_toref_{0:d}.sh'.format(exp+1),'w')
alignscr.write("esorex --log-file=align_toref_{0:d}.log muse_exp_align --threshold=4. ../../Script/align_toref_{0:d}.sof".format(exp+1))
alignscr.close()
subprocess.call(["sh","../../Script/make_align_toref_{0:d}.sh".format(exp+1)])
#copy the offsets
alig=fits.open('OFFSET_LIST.fits')
alig.writeto('OFFSET_LIST_EXP{0:d}.fits'.format(exp+1),clobber=True)
else:
print('Offsets exist.. skip')
#define some output names for final cube
cname="DATACUBE_FINAL_LINEWCS_EXP{0:d}.fits".format(exp+1)
pname="PIXTABLE_REDUCED_LINEWCS_EXP{0:d}.fits".format(exp+1)
iname="IMAGE_FOV_LINEWCS_EXP{0:d}.fits".format(exp+1)
if not os.path.isfile(cname):
print("Processing exposure {0:d} to align to reference".format(exp+1))
#copy sof file written for basic reduction
sof_old=open("../../Script/scipost_{0:d}.sof".format(exp+1))
sof_name="../../Script/scipost_line_{0:d}.sof".format(exp+1)
sofedit=open(sof_name,'w')
#read the offsets
alig=fits.open('OFFSET_LIST_EXP{0:d}.fits'.format(exp+1))
offsets=alig[1].data[1]
#now apply offsets to pixel table
print ('Apply offsets...')
pixtablist=[]
for ll in sof_old:
if('PIXTABLE_OBJECT' in ll):
pixtab=ll.split(' ')[0]
pxt=fits.open('../Basic/'+pixtab)
pxt[0].header['RA']=pxt[0].header['RA']-offsets[2]
pxt[0].header['DEC']=pxt[0].header['DEC']-offsets[3]
pxt.writeto("WCS_"+pixtab,clobber=True)
pixtablist.append("WCS_"+pixtab)
sofedit.write("WCS_"+pixtab+" PIXTABLE_OBJECT\n")
elif('STD_' in ll):
fil,tag=ll.split(' ')
sofedit.write("../Basic/"+fil+" "+tag)
else:
sofedit.write(ll)
#append reference frame to sof file
sofedit.write('../../../{}/DATACUBE_FINAL.fits OUTPUT_WCS\n'.format(refpath))
sofedit.close()
sof_old.close()
#Write the command file
scr=open("../../Script/make_scipost_line_{0:d}.sh".format(exp+1),"w")
scr.write("OMP_NUM_THREADS={0:d}\n".format(nproc))
scr.write('esorex --log-file=scipost_line_{0:d}.log muse_scipost --filter=white --skymethod="none" --save=cube,individual ../../Script/scipost_line_{0:d}.sof'.format(exp+1))
scr.close()
#Run pipeline
subprocess.call(["sh", "../../Script/make_scipost_line_{0:d}.sh".format(exp+1)])
subprocess.call(["mv","DATACUBE_FINAL.fits",cname])
subprocess.call(["mv","IMAGE_FOV_0001.fits",iname])
subprocess.call(["mv","PIXTABLE_REDUCED_0001.fits",pname])
else:
print("Exposure {0:d} exists.. skip! ".format(exp+1))
#clean dir for unwanted stuff...
print ('Clean directory!')
garbage=glob.glob("WCS_PIXTABLE_OBJECT*")
for gg in garbage:
os.remove(gg)
#back to top
os.chdir(topdir)
def make_ifumasks(listob,refpath='./',nproc=24):
"""
Loop over each OB and run scipost_make_cube on the reduced pixel tables
to produce a final IFU masks for the resampled cube
listob -> OBs to process
refpath -> where reference path is for WCS resampling
nproc -> numer of processors in parallel runs
"""
import os
import glob
import subprocess
import shutil
from astropy.io import fits
import muse_utils as mut
import numpy as np
#grab top dir
topdir=os.getcwd()
#now loop over each folder and make the final sky-subtracted cubes
for ob in listob:
#change dir
os.chdir(ob+'/Proc/Line/')
print('Processing {} for IFU mask'.format(ob))
#Search how many exposures are there
scils=glob.glob("../Basic/OBJECT_RED_0*.fits*")
nsci=len(scils)
#loop on exposures and reduce frame with sky subtraction
for exp in range(nsci):
#define some output names for final cube
cname="DATACUBE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
iname="IMAGE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
if not os.path.isfile(cname):
print("Processing exposure {0:d} to produce IFU mask".format(exp+1))
#make a sof file
sof_name="../../Script/scipost_ifumask_{0:d}.sof".format(exp+1)
sofedit=open(sof_name,'w')
#append reduced pixel table and reference frame to sof file
origpix='PIXTABLE_REDUCED_LINEWCS_EXP{0:d}.fits'.format(exp+1)
newpix='IFUMASK_PIXTABLE_LINEWCS_EXP{0:d}.fits'.format(exp+1)
sofedit.write(newpix+' PIXTABLE_OBJECT\n')
sofedit.write('../../../{}/DATACUBE_FINAL.fits OUTPUT_WCS\n'.format(refpath))
sofedit.close()
#Write the command file
scr=open("../../Script/make_scipost_ifumask_{0:d}.sh".format(exp+1),"w")
scr.write("OMP_NUM_THREADS={0:d}\n".format(nproc))
scr.write('esorex --log-file=scipost_ifumask_{0:d}.log muse_scipost_make_cube --filter=white ../../Script/scipost_ifumask_{0:d}.sof'.format(exp+1))
scr.close()
#create the IFU mask
#unpack ifu origin
pxt=fits.open(origpix)
ifu,islice=mut.unpack_pixtab(pxt[7].data)
#loop over ifu
for iff in range(24):
#group slices in 4 stacks
for i in range(4):
imin=i*12+1
imax=(i+1)*12
#find pixels and set value to flag
pixinside=np.where((islice >= imin) & (islice <= imax) & (ifu == (iff+1)))
pxt[4].data[pixinside] = (iff+1)*100.+i+1
print('Done with IFU:', iff+1)
#save updated
pxt.writeto(newpix,clobber=True)
pxt.close()
#Run pipeline
subprocess.call(["sh", "../../Script/make_scipost_ifumask_{0:d}.sh".format(exp+1)])
subprocess.call(["mv","DATACUBE_FINAL.fits",cname])
subprocess.call(["mv","IMAGE_FOV_0001.fits",iname])
else:
print("IFU mask {0:d} exists.. skip! ".format(exp+1))
#back to top
os.chdir(topdir)
def make_illcorr(listob):
"""
Wrapper function for illumination correction
"""
import os
import glob
#grab top dir
topdir=os.getcwd()
#define the bandwidth for illumination correction
#bindwith of ~100 are found to be optimal
binwidth=100
#now loop over each folder and make the final illcorrected cubes
for ob in listob:
#change dir
os.chdir(ob+'/Proc/Line/')
print('Processing {} for illumination correction'.format(ob))
#Search how many exposures are there
scils=glob.glob("../Basic/OBJECT_RED_0*.fits*")
nsci=len(scils)
#loop on exposures and reduce frame with sky subtraction
for exp in range(nsci):
#do pass on IFUs
print('First pass for exposure {}'.format(exp+1))
#these are the ifu masks
ifumask_cname="DATACUBE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
ifumask_iname="IMAGE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
#these are the data cubes
data_cname="DATACUBE_FINAL_LINEWCS_EXP{0:d}.fits".format(exp+1)
data_iname="IMAGE_FOV_LINEWCS_EXP{0:d}.fits".format(exp+1)
#go for IFU corrections in coarse wavelenght bins
outcorr="ILLCORR_EXP{0:d}_ifu.fits".format(exp+1)
outcorrnorm="ILLCORRNORM_EXP{0:d}_ifu.fits".format(exp+1)
newcube="DATACUBE_FINAL_LINEWCS_EXP{0:d}_ILLCORR_ifu.fits".format(exp+1)
newimage="IMAGE_FOV_LINEWCS_EXP{0:d}_ILLCORR_ifu.fits".format(exp+1)
make_illcorr_ifu(ifumask_cname,ifumask_iname,data_cname,\
data_iname,outcorr,outcorrnorm,newcube,newimage,binwidth)
#do second pass on stack - just constant on white image
print('Second pass for exposure {}'.format(exp+1))
#these are the ifu masks
ifumask_cname="DATACUBE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
ifumask_iname="IMAGE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
#these are the data cubes
data_cname=newcube
data_iname=newimage
#go for stack corrections in coarse wavelenght bins
outcorrnorm="ILLCORRNORM_EXP{0:d}_stack.fits".format(exp+1)
newcube="DATACUBE_FINAL_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(exp+1)
newimage="IMAGE_FOV_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(exp+1)
masknative="MASK_EXP{0:d}_ILLCORR_native.fits".format(exp+1)
maskedges="MASK_EXP{0:d}_ILLCORR_edges.fits".format(exp+1)
outcorr="ILLCORR_EXP{0:d}_stack.fits".format(exp+1)
make_illcorr_stack(ifumask_cname,ifumask_iname,data_cname,\
data_iname,outcorr,newcube,newimage,masknative,maskedges)
#back to top for next OB
os.chdir(topdir)
def make_illcorr_ifu(ifumask_cname,ifumask_iname,data_cname,data_iname,outcorr,outcorrnorm,newcube,
newimage,binwidth,debug=False):
"""
Perform illumination correction on IFUs in wavelength bins
ifumask_cname,ifumask_iname --> IFU mask cube and image names
data_cname,data_iname --> data cube and image names
outcorr,outcorrnorm --> correction save name
newcube,newimage --> data cube and image names for wave dep IFU corrections
binwidth --> how big chuncks in z-direction used for computing illumination correction
debug --> enable interactive displays
"""
import os
import glob
import subprocess
import shutil
from astropy.io import fits
import muse_utils as mut
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.signal as sgn
from scipy.stats import sigmaclip
from scipy import interpolate
import sep
#open the ifu mask to create a good mask
data=fits.open(data_cname)
ifimask=fits.open(ifumask_iname)
fovdata=fits.open(data_iname)
#define geometry
nwave=data[1].header["NAXIS3"]
nx=data[1].header["NAXIS1"]
ny=data[1].header["NAXIS2"]
#now flag the sources
ifumsk=ifimask[1].data
image=fovdata[1].data.byteswap().newbyteorder()
bkg=sep.Background(image)
bkg.subfrom(image)
obj,segmap=sep.extract(image,3.*bkg.globalrms,minarea=10,segmentation_map=True)
#manual reset segmap
#reset=np.where(segmap==20)
#segmap[reset]=0
#make a corse illumination correction in wavelenght
nbins=nwave/binwidth
illcorse=np.zeros((nbins,24))
illnorm=np.zeros((nbins,24))
illsmoo=np.zeros((nbins,24))
cbins=np.array(range(nbins))*binwidth+binwidth/2.
#skip if already processed
if not os.path.isfile(outcorr):
if(debug):
plt.imshow(image,origin='low')
plt.title('Field')
plt.show()
plt.imshow(segmap,origin='low')
plt.title('Source mask')
plt.show()
plt.imshow(ifumsk,origin='low')
plt.title('IFU mask')
plt.show()
#pixels used
usedpix=np.zeros((ny,nx))
#loop over ifus
for iff in range(24):
print ('Computing correction for IFU {}'.format(iff+1))
#reconstruct id of pixels in this IFU
flagvalue = (iff+1)*100.
#pick pixels in this group and without sources
#these are x,y in 2D image
goodpx=np.nonzero(((ifimask[1].data == flagvalue+1) |
(ifimask[1].data == flagvalue+2) |
(ifimask[1].data == flagvalue+3) |
(ifimask[1].data == flagvalue+4)) & (segmap < 1))
usedpix[goodpx]=1
#loop over bins
for bb in range(nbins):
#get the start end index
wstart=bb*binwidth
wend=(bb+1)*binwidth
#sum all in wave
img=np.nansum(data[1].data[wstart:wend,:,:],axis=0)/binwidth
#take median across spatial pixels
illcorse[bb,iff]=np.nanmedian(img[goodpx])
#compute robust mean - nans already excluded [does not perform very well]
#c,l,u=sigmaclip(img[goodpx],3.,3.)
#illcorse[bb,iff]=c.mean()
#save
hdu = fits.PrimaryHDU(illcorse)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outcorr,clobber=True)
if(debug):
plt.imshow(usedpix,origin='low')
plt.title('Pixels used for IFU correction')
plt.show()
else:
print('Loading pre-computed corrections')
illcorse=(fits.open(outcorr))[0].data
#skip if already exists
if not os.path.isfile(newcube):
#next go for ifus normalisation given median
for iff in range(24):
#normalise
illnorm[:,iff]=illcorse[:,iff]/np.nanmedian(illcorse,axis=1)
#remove small scales bumps - [does not work well for discontinuities]
#illsmoo[:,iff]=sgn.savgol_filter(illnorm[:,iff],5,1)
#best to linear interpolate
illsmoo[:,iff]=illnorm[:,iff]
if(debug):
plt.scatter(cbins,illnorm[:,iff])
plt.plot(cbins,illsmoo[:,iff])
plt.title("Corrections for IFU {}".format(iff+1))
plt.show()
#save corrections
hdu1 = fits.PrimaryHDU(illnorm)
hdu2 = fits.ImageHDU(illsmoo)
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(outcorrnorm,clobber=True)
#store old cube to check final normalisation
oldcube=np.copy(data[1].data)
#now apply
for iff in range(24):
print ('Correct IFUs {}'.format(iff+1))
if(iff < 23):
#first, interpolation along ifus
x_current_ifu=(iff+1)*100.
x_next_ifu=(iff+2)*100.
#grab relevant pixels
goodpx=np.where((ifimask[1].data >=x_current_ifu) & (ifimask[1].data < x_next_ifu))
fcurrent=interpolate.interp1d(cbins,illsmoo[:,iff],fill_value="extrapolate")
fnext=interpolate.interp1d(cbins,illsmoo[:,iff+1],fill_value="extrapolate")
#loop over wave and apply correction
for ww in range(nwave):
y_current=fcurrent(ww)
y_next=fnext(ww)
slope=((y_next-y_current)/(x_next_ifu-x_current_ifu))
correction=y_current+slope*(ifimask[1].data[goodpx]-x_current_ifu)
#apply correction to data
img=data[1].data[ww,:,:]
img[goodpx]=img[goodpx]/correction
data[1].data[ww,:,:]=img
#preserve SN
var=data[2].data[ww,:,:]
var[goodpx]=var[goodpx]/correction/correction
data[2].data[ww,:,:]=var
else:
#deal with last - simple correction with no interpolation
x_current_ifu=(iff+1)*100.
goodpx=np.where((ifimask[1].data >=x_current_ifu))
fcurrent=interpolate.interp1d(cbins,illsmoo[:,iff],fill_value="extrapolate")
for ww in range(nwave):
#apply to data
img=data[1].data[ww,:,:]
img[goodpx]=img[goodpx]/fcurrent(ww)
data[1].data[ww,:,:]=img
#preserve SN
var=data[2].data[ww,:,:]
var[goodpx]=var[goodpx]/fcurrent(ww)/fcurrent(ww)
data[2].data[ww,:,:]=var
#finally, check for normalisation
print ('Checking flux normalisation...')
white_old=np.zeros((ny,nx))
white_new=np.zeros((ny,nx))
for xx in range(nx):
for yy in range(ny):
white_old[yy,xx]=np.nansum(oldcube[:,yy,xx])/nwave
white_new[yy,xx]=np.nansum(data[1].data[:,yy,xx])/nwave
#renormalise on sky only
goodpx=np.where((segmap==0)&(np.isfinite(ifimask[1].data)))
#oldcoeff=np.nanmedian(white_old[goodpx])
#newcoeff=np.nanmedian(white_new[goodpx])
#print ('Renormalise by {}'.format(oldcoeff/newcoeff))
#data[1].data=data[1].data*oldcoeff/newcoeff
#data[2].data=data[2].data*(oldcoeff/newcoeff)*(oldcoeff/newcoeff)
renormcoeff=np.nanmedian(white_old[goodpx]/white_new[goodpx])
print ('Renormalise by {}'.format(renormcoeff))
data[1].data=data[1].data*renormcoeff
data[2].data=data[2].data*renormcoeff*renormcoeff
#save new cubes
data.writeto(newcube,clobber=True)
#create white image
print ('Creating final white image')
white_new=np.zeros((ny,nx))
for xx in range(nx):
for yy in range(ny):
white_new[yy,xx]=np.nansum(data[1].data[:,yy,xx])/nwave
#save image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(white_new)
hdu2.header=data[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(newimage,clobber=True)
else:
print ("Exposure already corrected for IFU illumination... move to next")
def make_illcorr_stack(ifumask_cname,ifumask_iname,data_cname,data_iname,outcorr,
newcube,newimage,masknative,maskedges,debug=False):
"""
Perform illumination correction on stacks on white image only
ifumask_cname,ifumask_iname --> IFU mask cube and image names
data_cname,data_iname --> data cube and image names
outcorr --> correction save name
newcube,newimage --> data cube and image names for white image stack corrections
masknative --> in oputput, mask of native pixels which have not been interpolated
maskedges --> in output, mask of stack edges
debug --> enable interactive displays
"""
import os
import glob
import subprocess
import shutil
from astropy.io import fits
import muse_utils as mut
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.signal as sgn
from scipy.stats import sigmaclip
from scipy import interpolate
import sep
if not os.path.isfile(newcube):
#open the ifu mask to create a good mask
data=fits.open(data_cname)
ifimask=fits.open(ifumask_iname)
fovdata=fits.open(data_iname)
#define geometry
nwave=data[1].header["NAXIS3"]
nx=data[1].header["NAXIS1"]
ny=data[1].header["NAXIS2"]
#now flag the sources
ifumsk=ifimask[1].data
image=fovdata[1].data.byteswap().newbyteorder()
bkg=sep.Background(image)
bkg.subfrom(image)
obj,segmap=sep.extract(image,5.*bkg.globalrms,minarea=10,segmentation_map=True)
#remove illumination patterns that can be selected as sources
#by allowing very extended regions
for ii,pp in enumerate(obj):
if(pp['npix'] > 900):
#print ii, pp['npix']
pix=np.where(segmap == ii+1)
segmap[pix]=0
if(debug):
plt.imshow(image,origin='low')
plt.title('Field')
plt.show()
plt.imshow(segmap,origin='low')
plt.title('Source mask')
plt.show()
plt.imshow(ifumsk,origin='low')
plt.title('IFU mask')
plt.show()
#now compute individual corrections and also prepare maks of native pixels"
#the step above removes any wave dependency
#now apply a stack by stack correction computed on white image
print('Computing correction for stacks on white image')
masknoninterp=np.zeros((ny,nx))
usedpix=np.zeros((ny,nx))
#renormalise on sky only
goodpx=np.where((segmap==0) & (np.isfinite(ifimask[1].data)))
medcoeff=np.nanmedian(fovdata[1].data[goodpx])
#now compute individual on stacks corrections
white_corrections=np.zeros((24,4))
for iff in range(24):
for i in range(4):
#reconstruct id of pixels in this IFU
flagvalue = (iff+1)*100.+i+1
#pick pixels in this group and without sources
#these are indexes in 2D image
goodpx=np.where((ifimask[1].data == flagvalue)&(segmap==0))
nonintpx=np.where((ifimask[1].data == flagvalue))
usedpix[goodpx]=1
masknoninterp[nonintpx]=1
white_corrections[iff,i]=medcoeff/np.nanmedian(fovdata[1].data[goodpx])
#some dispaly
if(debug):
plt.imshow(usedpix,origin='low')
plt.title('Pixels used for stack white correction')
plt.show()
plt.imshow(masknoninterp,origin='low')
plt.title('Pixels not interpolated')
plt.show()
#save products
hdu = fits.PrimaryHDU(white_corrections)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outcorr,clobber=True)
#save products
hdu = fits.PrimaryHDU(white_corrections)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outcorr,clobber=True)
#save image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(masknoninterp)
hdu2.header=data[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(masknative,clobber=True)
#next apply correction
maskpixedge=np.zeros((ny,nx))
#grab muse rotator for this exposure
rotation=data[0].header["HIERARCH ESO INS DROT POSANG"]
for iff in range(24):
#this/next ifu pixel
thisifu=(iff+1)*100.
nextifu=(iff+2)*100.
for i in range(4):
#reconstruct id of pixels in this/next stack
thisstack=(iff+1)*100.+i+1
nextstack=(iff+1)*100.+i+2
#pixels in this exact stack
instack=np.where(ifimask[1].data==thisstack)
#pixels in this IFUs (also interpolated)
inifu=np.where((ifimask[1].data>=thisifu) & (ifimask[1].data<nextifu))
#first find left-right edges of the stacks - this is dependent on rotation
if((rotation == 0.) | (rotation == 180.) | (rotation == 360.)):
#find edges with buffer
left=np.min(instack[1])
right=np.max(instack[1])
bottom=np.min(inifu[0])
top=np.max(inifu[0])
maskpixedge[bottom:top,left+2:right-2]=1
#apply without interpolation
#apply to data
data[1].data[:,bottom:top,left:right]=data[1].data[:,bottom:top,left:right]*white_corrections[iff,i]
#preserve SN
data[2].data[:,bottom:top,left:right]=data[2].data[:,bottom:top,left:right]*\
white_corrections[iff,i]*white_corrections[iff,i]
elif((rotation == 90.) | (rotation == 270.)):
left=np.min(instack[0])
right=np.max(instack[0])
bottom=np.min(inifu[1])
top=np.max(inifu[1])
maskpixedge[left+2:right-2,bottom:top]=1
#apply without interpolation
#apply to data
data[1].data[:,left:right,bottom:top]=data[1].data[:,left:right,bottom:top]*white_corrections[iff,i]
#preserve SN
data[2].data[:,left:right,bottom:top]=data[2].data[:,left:right,bottom:top]*\
white_corrections[iff,i]*white_corrections[iff,i]
else:
print("Cannot handle rotation {}... quit!".format(rotation))
exit()
if(debug):
plt.imshow(maskpixedge,origin='low')
plt.title("Edge mask")
plt.show()
#save edge mask
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(maskpixedge)
hdu2.header=data[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(maskedges,clobber=True)
#save new cubes
data.writeto(newcube,clobber=True)
#create white image
print ('Creating final white image')
white_new=np.zeros((ny,nx))
for xx in range(nx):
for yy in range(ny):
white_new[yy,xx]=np.nansum(data[1].data[:,yy,xx])/nwave
#save image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(white_new)
hdu2.header=data[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(newimage,clobber=True)
else:
print ("Exposure already corrected... go to next")
def internalskysub(listob,skymask,deepwhite=None):
"""
Perform sky-subtraction using pixels within the cube
listob -> OBs to loop on
skymask -> if defined to a ds9 region file (iamge coordinate),
compute sky in these regions (excluding sources)
Otherwise mask sources and use all the pixels in the field.
"""
import os
import glob
from astropy.io import fits
import numpy as np
import zap
import matplotlib.pyplot as plt
import sep
#grab top dir
topdir=os.getcwd()
#now loop over each folder and make the final illcorrected cubes
for ob in listob:
#change dir
os.chdir(ob+'/Proc/Line/')
print('Processing {} for sky subtraction correction'.format(ob))
#Search how many exposures are there
scils=glob.glob("../Basic/OBJECT_RED_0*.fits*")
nsci=len(scils)
#loop on exposures and reduce frame with zeroth order sky subtraction + ZAP
for exp in range(nsci):
#do pass on IFUs
print('Interal sky subtraction of exposure {}'.format(exp+1))
#define names
oldcube="DATACUBE_FINAL_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(exp+1)
oldimage="IMAGE_FOV_LINEWCS_EXP{0:d}_ILLCORR_stack.fits".format(exp+1)
newcube="DATACUBE_FINAL_LINEWCS_EXP{0:d}_lineskysub.fits".format(exp+1)
newimage="IMAGE_FOV_LINEWCS_EXP{0:d}_lineskysub.fits".format(exp+1)
ifumask_iname="IMAGE_IFUMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
source_mask="IMAGE_SOURCEMASK_LINEWCS_EXP{0:d}.fits".format(exp+1)
zapcube="DATACUBE_FINAL_LINEWCS_EXP{0:d}_zapsky.fits".format(exp+1)
zapimage="IMAGE_FOV_LINEWCS_EXP{0:d}_zapsky.fits".format(exp+1)
zapsvdout="ZAPSVDOUT_EXP{0:d}.fits".format(exp+1)
if not os.path.isfile(zapcube):
#open the cube
cube=fits.open(oldcube)
#open mask ifu
ifumask=fits.open(ifumask_iname)
#if white image provided load it
if(deepwhite):
print("Use source mask image {}".format(deepwhite))
whsrc=fits.open(topdir+'/'+deepwhite)
whitesource=whsrc[0].data.byteswap().newbyteorder()
else:
#create from cube
print("Create source mask image from cube")
whitesource=np.nanmedian(cube[1].data,axis=0)
#now create a source mask
print ('Create a source mask')
header=cube[1].header
bkg = sep.Background(whitesource)
bkg_subtraced_data = whitesource - bkg.back()
thresh = 3. * bkg.globalrms
minarea=20.
clean=True
segmap = np.zeros((header["NAXIS2"],header["NAXIS1"]))
#extract objects
objects,segmap=sep.extract(bkg_subtraced_data,thresh,segmentation_map=True,
minarea=minarea,clean=clean)
#plt.imshow(segmap,origin='low')
#plt.show()
#plt.imshow(whitesource,origin='low')
#plt.show()
#define geometry
nwave=cube[1].header["NAXIS3"]
nx=cube[1].header["NAXIS1"]
ny=cube[1].header["NAXIS2"]
#make sure pixels are sky sub once and only once
countsub=np.copy(ifumask[1].data)*0.
#if mask is set do a corse median sky subtraction
if(skymask):
print('Constructing sky mask')
#for zap, sky region should be 0, and sources >1
skybox=np.zeros((ny,nx))+1
#construct the sky region mask
from mypython.fits import pyregmask as pmk
mysky=pmk.PyMask(nx,ny,"../../../"+skymask,header=cube[1].header)
for ii in range(mysky.nreg):
mysky.fillmask(ii)
usepix=np.where(mysky.mask > 0)
skybox[usepix]=0
#plt.imshow(skybox,origin='low')
#plt.show()
#plt.imshow(segmap,origin='low')
#plt.show()
#plt.imshow(ifumask[1].data,origin='low')
#plt.show()
#exit()
#now do median sky subtraction
#loop over wavelength
for ww in range(nwave):
#extract sky slice
skyimg=cube[1].data[ww,:,:]
#grab pixels with no source and in mask region
#avoid edges not flagged by IFU mask
pixels=np.where((skybox<1)&(segmap < 1)&(ifumask[1].data>0))
#compute sky in good regions
medsky=np.nanmedian(skyimg[pixels])
#subtract from all pixels
cube[1].data[ww,:,:]=skyimg-medsky
else:
#otherwise do coarse sky IFU by IFU
#loop over ifu
for iff in range(24):
thisifu=(iff+1)*100.
nextifu=(iff+2)*100.+1
#grab pixels in ifu without sources
pixels=np.where((ifumask[1].data >= thisifu) & \
(ifumask[1].data < nextifu)\
& (segmap < 1) )
pixels_ifu=np.where((ifumask[1].data >= thisifu) \
& (ifumask[1].data < nextifu)\
& (countsub < 1))
#update used pixels
countsub[pixels_ifu]=1
#loop over wavelength
for ww in range(nwave):
skyimg=cube[1].data[ww,:,:]
#compute sky in good regions
medsky=np.nanmedian(skyimg[pixels])
#subtract from all IFU pixels
skyimg[pixels_ifu]=skyimg[pixels_ifu]-medsky
cube[1].data[ww,:,:]=skyimg
#write final cube
cube.writeto(newcube,clobber=True)
#create white image
print ('Creating final white image')
white_new=np.zeros((ny,nx))
for xx in range(nx):
for yy in range(ny):
white_new[yy,xx]=np.nansum(cube[1].data[:,yy,xx])/nwave
#save projected image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(white_new)
hdu2.header=cube[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(newimage,clobber=True)
#save segmap
#make it redundant to be sure ZAP read right extension
hdu1 = fits.PrimaryHDU(segmap)
#hdu1.header=header
hdu2 = fits.ImageHDU(segmap)
#hdu2.header=header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(source_mask,clobber=True)
print('Running ZAP on exposure {}'.format(exp+1))
#deal with masks
if(skymask):
#combine sky mask with source mask
#make it redundant to be sure ZAP read right extension
tmpzapmask=segmap+skybox
hdu1 = fits.PrimaryHDU(tmpzapmask)
#hdu1.header=header
hdu2 = fits.ImageHDU(tmpzapmask)
#hdu2.header=header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto("ZAP_"+source_mask,clobber=True)
zapmask="ZAP_"+source_mask
else:
zapmask=source_mask
#clean old if exists
try:
os.remove(zapsvdout)
except:
pass
#run new - handle change in keywords from v1 to v2
try:
zap.process(newcube,outcubefits=zapcube,clean=True,svdoutputfits=zapsvdout,mask=zapmask)
except:
zap.process(newcube,outcubefits=zapcube,clean=True,mask=zapmask)
#create white image from zap cube
cube=fits.open(zapcube)
print ('Creating final white image from ZAP')
white_new=np.zeros((ny,nx))
for xx in range(nx):
for yy in range(ny):
white_new[yy,xx]=np.nansum(cube[1].data[:,yy,xx])/nwave
#save projected image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(white_new)
hdu2.header=cube[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto(zapimage,clobber=True)
else:
print("ZAP cube exist alread for exposure {}... skip!".format(exp+1))
#back to top for next OB
os.chdir(topdir)
def combine_cubes(listcubes,listmasks,regions=True):
"""
Combine cubes in mean mode o with median.
Apply masks as desired.
cubes -> a list of cubes to use in the combine
masks -> a list of goodpix masks from the pipeline
regions -> if True, code searches for ds9 region files inside path with same
name as pipeline mask (.reg), to mask additional area that one wants
to clip
"""
from astropy.io import fits
import numpy as np
import scipy
import os
import matplotlib.pyplot as plt
from mypython.fits import pyregmask as msk
if(os.path.isfile("COMBINED_CUBE_MED.fits") & os.path.isfile("COMBINED_CUBE.fits") ):
print ("Coadded cubes already exists!")
return
#continue with implicit else if passed the checks
if(regions):
print ("Updating the masks following ds9 regions")
#loads list
clistmask=np.loadtxt(listmasks,dtype=np.dtype('a'))
#redefine new mask
mask_new="new_"+listmasks
llms=open(mask_new,"w")
#loop over and update with regions
#if scalar, make it 1 element list
if(clistmask.shape == ()):
clistmask=[clistmask]
for i,cmask in enumerate(clistmask):
#create region name
regname_line=(cmask.split(".fits")[0])+".reg"
#reconstruct cubex region name
rnpath=(cmask.split("MASK")[0])
rnexp=(cmask.split("_")[1])
regname_cubex=rnpath+"DATACUBE_FINAL_LINEWCS_"+rnexp+"_fix2_SliceEdgeMask.reg"
#search if file exist
if(os.path.isfile(regname_line)):
regname=regname_line
elif(os.path.isfile(regname_cubex)):
regname=regname_cubex
else:
regname=None
if(regname):
#update the mask
print ("Updating mask using {}".format(regname))
#open fits
cfits=fits.open(cmask)
#init reg mask
Mask = msk.PyMask(cfits[1].header["NAXIS1"],cfits[1].header["NAXIS2"],regname)
for ii in range(Mask.nreg):
Mask.fillmask(ii)
if(ii == 0):
totmask=Mask.mask
else:
totmask+=Mask.mask
#update the mask
cfits[1].data=cfits[1].data*1*np.logical_not(totmask)
savename=cmask.split(".fits")[0]+'_wreg.fits'
cfits.writeto(savename,clobber=True)
llms.write(savename+'\n')
else:
#keep current mask
llms.write(cmask+'\n')
#done with new masks
llms.close()
else:
print ('Using original masks...')
mask_new=listmasks
print ("Combining cubes with mean and median")
#load the relevant lists
cblis=open(listcubes)
mklis=open(mask_new)
allcubes=[]
allmasks=[]
for cc in cblis:
allcubes.append(fits.open(cc.strip()))
for mm in mklis:
allmasks.append(fits.open(mm.strip()))
cblis.close()
mklis.close()
#generate list of cubes
nexp=len(allcubes)
print ('Coadding {} exposures...'.format(nexp))
#make space for final grid
finalcube_mean=np.copy((allcubes[1])[1].data)
finalvar=np.copy((allcubes[1])[2].data)
finalcube_median=np.copy((allcubes[1])[1].data)
#grab info on pixels
nx=(allcubes[1])[1].header["NAXIS1"]
ny=(allcubes[1])[1].header["NAXIS2"]
nw=(allcubes[1])[1].header["NAXIS3"]
#giant for loop over wave,pix
print ('Working on {} slices...'.format(nw))
piximage=np.zeros((nexp,ny,nx))
varimage=np.zeros((nexp,ny,nx))
mskimage=np.zeros((nexp,ny,nx))
masknans=np.zeros((ny,nx))
for ww in range(nw):
#print (' {} '.format(ww+1),end='')
#now loop over exposure
for ee in range(nexp):
piximage[ee,:]=(allcubes[ee])[1].data[ww,:]
varimage[ee,:]=(allcubes[ee])[2].data[ww,:]
#clean nan
masknans=masknans*0
notnans=np.where(np.isfinite(piximage[ee,:]))
masknans[notnans]=1
#1 good pixels at first, then 1 bad pixels
mskimage[ee,:]=np.logical_not(((allmasks[ee])[1].data)*masknans)
#construct masked arrays
pixmasked=np.ma.array(piximage,mask=mskimage)
varmasked=np.ma.array(varimage,mask=mskimage)
#make coadds with masking
finalcube_median[ww,:]=np.ma.median(pixmasked,axis=0)
finalcube_mean[ww,:]=np.ma.mean(pixmasked,axis=0)
countmap=np.ma.count(varmasked,axis=0)
finalvar[ww,:]=np.ma.sum(varmasked,axis=0)/countmap/countmap
#write
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(finalcube_mean)
hdu3 = fits.ImageHDU(finalvar)
hdu2.header=(allcubes[0])[1].header
hdu3.header=(allcubes[0])[2].header
hdulist = fits.HDUList([hdu1,hdu2,hdu3])
hdulist.writeto("COMBINED_CUBE.fits",clobber=True)
#write
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(finalcube_median)
hdu3 = fits.ImageHDU(finalvar)
hdu2.header=(allcubes[0])[1].header
hdu3.header=(allcubes[0])[2].header
hdulist = fits.HDUList([hdu1,hdu2,hdu3])
hdulist.writeto("COMBINED_CUBE_MED.fits",clobber=True)
#make white images
print ('Creating final white images')
white_mean=np.zeros((ny,nx))
white_med=np.zeros((ny,nx))
for xx in range(nx):
for yy in range(ny):
white_mean[yy,xx]=np.sum(finalcube_mean[:,yy,xx])/nw
white_med[yy,xx]=np.sum(finalcube_median[:,yy,xx])/nw
#save projected image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(white_mean)
hdu2.header=(allcubes[0])[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto("COMBINED_IMAGE.fits",clobber=True)
#save projected image
hdu1 = fits.PrimaryHDU([])
hdu2 = fits.ImageHDU(white_med)
hdu2.header=(allcubes[0])[1].header
hdulist = fits.HDUList([hdu1,hdu2])
hdulist.writeto("COMBINED_IMAGE_MED.fits",clobber=True)
| gpl-2.0 |
jkoelker/python-eoddata | eoddata/datareader.py | 1 | 9079 | # -*- coding: utf-8 -*-
import os
import logging
import pandas as pd
import pytz
from tzlocal import windows_tz
import appdirs
import ws
LOG = logging.getLogger(__name__)
_TYPE_MAP = {'integer': int,
'unicode': str,
'string': str,
'boolean': bool,
'datetime': 'M8[ns]'}
def file_name(name, format):
return '.'.join((name, format))
def get_file(name, expiration=None):
if not os.path.exists(name):
return
expiration = pd.core.datetools.to_offset(expiration)
if expiration:
mtime = pd.datetime.utcfromtimestamp(os.path.getmtime(name))
if (pd.datetime.now() - mtime) >= expiration:
return
return name
def cleanup(data):
types = data.apply(lambda x: pd.lib.infer_dtype(x.values))
for type_name, type_type in _TYPE_MAP.iteritems():
for col in types[types == type_name].index:
data[col] = data[col].astype(type_type)
return data
def timetastic(ts, tz=None):
if ts is None:
return ts
ts = pd.to_datetime(ts)
if tz is not None and (not hasattr(ts, 'tzinfo') or ts.tzinfo is None):
ts = ts.tz_localize(tz)
return ts
class Manager(object):
def __init__(self, client):
self.client = client
def _last_trade_date(self, exchange, expiration='1d'):
exchanges = self.exchanges(expiration=expiration)
return exchanges[exchange]['last_trade_date_time']
def exchange_tz(self, exchange, exchanges=None):
# NOTE(jkoelker) EODData's service is windows based, convert times here
if exchanges is None:
exchanges = self.exchanges()
exchange_tz = exchanges[exchange]['time_zone']
return pytz.timezone(windows_tz.tz_names[exchange_tz])
def exchanges(self, expiration='1d'):
LOG.info("Getting Exchanges")
exchanges = self.client.exchanges()
for exchange in exchanges:
exchange_tz = self.exchange_tz(exchange, exchanges=exchanges)
for col in ('intraday_start_date', 'last_trade_date_time'):
exchanges[exchange][col] = timetastic(exchanges[exchange][col],
tz=exchange_tz)
return pd.DataFrame(exchanges)
def symbols(self, exchange, expiration='1d'):
LOG.info("Getting Symbols for exchange %s" % exchange)
return pd.DataFrame(self.client.symbols(exchange))
def history(self, exchange, symbol, start, end=None, period='d'):
symbols = self.symbols(exchange)
if symbol not in symbols:
return pd.DataFrame()
tz = self.exchange_tz(exchange)
start = timetastic(start, tz)
end = timetastic(end, tz)
exchange_end = self._last_trade_date(exchange)
if end > exchange_end:
end = exchange_end
LOG.info("Getting History for %s:%s from %s to %s" % (exchange, symbol,
start, end))
history = self.client.history(exchange, symbol, start, end, period)
if not history:
return pd.DataFrame()
history = pd.DataFrame.from_records(history, index='date_time')
# NOTE(jkoelker) Sometimes we'll get an extra period back
if end is not None:
history = history[history.index <= end]
history.index = history.index.tz_localize(tz)
return history
def open(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
class CacheManager(Manager):
def __init__(self, client, directory=None, name='eoddata',
*args, **kwargs):
Manager.__init__(self, client)
if directory is None:
directory = appdirs.user_cache_dir(name)
self.directory = directory
if not os.path.exists(self.directory):
os.makedirs(self.directory)
class PickleCache(CacheManager):
@staticmethod
def _get_key(*parts):
return '/'.join(parts)
def _get_file(self, key, create=True):
filename = '.'.join(('/'.join((self.directory, key)), 'pkl'))
if create:
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
return filename
def _can_haz_cache(self, key, expiration=None):
filename = self._get_file(key)
if not os.path.exists(filename):
return False
if expiration is None:
return True
mtime = pd.to_datetime(os.path.getmtime(filename), unit='s')
expiration = pd.core.datetools.to_offset(expiration)
if (pd.datetime.now() - mtime) < expiration.delta:
return True
return False
def exchanges(self, expiration='1d'):
key = 'exchanges'
filename = self._get_file(key)
if self._can_haz_cache(key, expiration):
return pd.read_pickle(filename)
exchanges = CacheManager.exchanges(self, expiration)
exchanges.to_pickle(filename)
return exchanges
# TODO(jkoelker) handle rename/delisting and the like
def symbols(self, exchange, expiration='1d'):
key = self._get_key('symbols', exchange)
filename = self._get_file(key)
if self._can_haz_cache(key, expiration):
return pd.read_pickle(filename)
symbols = CacheManager.symbols(self, exchange, expiration)
symbols.to_pickle(filename)
return symbols
def _history(self, exchange, symbol, start, end=None, period='d'):
return CacheManager.history(self, exchange, symbol, start, end, period)
def history(self, exchange, symbol, start, end=None, period='d'):
tz = self.exchange_tz(exchange)
start = timetastic(start, tz)
end = timetastic(end, tz)
period_key = 'period_%s' % period
key = self._get_key('history', exchange, symbol, period_key)
filename = self._get_file(key)
exchange_end = self._last_trade_date(exchange)
if end is not None and end > exchange_end:
end = exchange_end
if not self._can_haz_cache(key):
history = self._history(exchange, symbol, start, end, period)
if history.empty:
return history
if os.path.exists(filename):
cached_history = pd.read_pickle(filename)
cached_history.combine_first(history).to_pickle(filename)
else:
history.to_pickle(filename)
return history
cached_history = pd.read_pickle(filename)
if end is None:
now = timetastic(pd.datetime.now(), tz)
history = cached_history.ix[start:now]
else:
# NOTE(jkoelker) String date indexing allows any time on the date
history = cached_history.ix[str(start):str(end.date())]
if not history.empty:
first_record = history.index[0]
last_record = history.index[-1]
# TODO(jkoelker) handle missing intraday data
if start.date() < first_record.date():
new_history = self._history(exchange, symbol, start,
first_record, period)
if not new_history.empty:
cached_history = cached_history.combine_first(new_history)
cached_history.to_pickle(filename)
history = history.combine_first(new_history)
if end is None:
search_end = timetastic(pd.datetime.now(), tz)
else:
search_end = end
if last_record.date() < search_end.date():
new_history = self._history(exchange, symbol, last_record,
search_end, period)
if not new_history.empty:
cached_history = cached_history.combine_first(new_history)
cached_history.to_pickle(filename)
history = history.combine_first(new_history)
return history
class DataReader(object):
def __init__(self, username, password, cache=None):
client = None
if cache and not isinstance(cache, Manager):
client = ws.Client(username, password)
self.datasource = None
if not cache:
self.datasource = Manager(client)
elif cache is True:
self.datasource = PickleCache(client)
else:
self.datasource = cache
def __call__(self, exchange, symbol, start, end=None, period='d',
full_history=True):
tz = self.datasource.exchange_tz(exchange)
start = timetastic(start, tz)
end = timetastic(end, tz)
history = self.datasource.history(exchange, symbol, start, end, period)
return history
def data(datareader, exchange, symbol, start, end, period):
for history in datareader(exchange, symbol, start, end, period).iterrows():
yield history
| mit |
shaunstanislaus/pandashells | pandashells/test/lomb_scargle_lib_tests.py | 10 | 3222 | #! /usr/bin/env python
from unittest import TestCase
import pandas as pd
import numpy as np
from pandashells.lib.lomb_scargle_lib import (
_next_power_two,
_compute_pad,
_compute_params,
lomb_scargle,
)
class NextPowerTwoTest(TestCase):
def test_proper_return(self):
past_100 = _next_power_two(100)
past_1000 = _next_power_two(1000)
self.assertEqual(past_100, 128)
self.assertEqual(past_1000, 1024)
class ComputePadTest(TestCase):
def test_exp0(self):
t = np.linspace(0, 10, 101)
t_pad, y_pad = _compute_pad(t)
dt = np.diff(t_pad)[-1]
self.assertAlmostEqual(dt, 0.1)
self.assertEqual(len(t_pad) + len(t), 128)
self.assertEqual(set(y_pad), {0.})
def test_exp2(self):
t = np.linspace(0, 10, 101)
t_pad, y_pad = _compute_pad(t, interp_exponent=2)
dt = np.diff(t_pad)[-1]
self.assertAlmostEqual(dt, 0.1)
self.assertEqual(len(t_pad) + len(t), 512)
self.assertEqual(set(y_pad), {0.})
class ComputeParamsTest(TestCase):
def test_proper_return(self):
t = np.linspace(0, 10, 101)
min_freq, d_freq, N = _compute_params(t)
self.assertAlmostEqual(min_freq, .1)
self.assertAlmostEqual(d_freq, 0.049504950495)
self.assertAlmostEqual(N, 101)
class LombScargleTest(TestCase):
def test_no_pad(self):
t = np.linspace(0, 10, 256)
y = 7 * np.sin(2 * np.pi * t)
df_in = pd.DataFrame({'t': t, 'y': y})
df = lomb_scargle(df_in, 't', 'y')
max_rec = df[df.amp == df.amp.max()].iloc[0]
self.assertTrue(all([x > 0 for x in df.period.diff().dropna()]))
self.assertAlmostEqual(max_rec['amp'], 7, places=0)
self.assertAlmostEqual(max_rec['power'], 49, places=0)
self.assertAlmostEqual(max_rec['period'], 1, places=0)
self.assertAlmostEqual(max_rec['freq'], 1, places=0)
self.assertEqual(len(df), 256)
def test_with_pad(self):
t = np.linspace(0, 10, 256)
y = 7 * np.sin(2 * np.pi * t)
df_in = pd.DataFrame({'t': t, 'y': y})
df = lomb_scargle(df_in, 't', 'y', interp_exponent=1)
max_rec = df[df.amp == df.amp.max()].iloc[0]
self.assertTrue(all([x > 0 for x in df.period.diff().dropna()]))
self.assertAlmostEqual(max_rec['amp'], 7, places=0)
self.assertAlmostEqual(max_rec['power'], 49, places=0)
self.assertAlmostEqual(max_rec['period'], 1, places=0)
self.assertAlmostEqual(max_rec['freq'], 1, places=0)
self.assertEqual(len(df), 512)
def test_freq_order(self):
t = np.linspace(0, 10, 256)
y = 7 * np.sin(2 * np.pi * t)
df_in = pd.DataFrame({'t': t, 'y': y})
df = lomb_scargle(df_in, 't', 'y', freq_order=True)
max_rec = df[df.amp == df.amp.max()].iloc[0]
self.assertTrue(all([x > 0 for x in df.freq.diff().dropna()]))
self.assertAlmostEqual(max_rec['amp'], 7, places=0)
self.assertAlmostEqual(max_rec['power'], 49, places=0)
self.assertAlmostEqual(max_rec['period'], 1, places=0)
self.assertAlmostEqual(max_rec['freq'], 1, places=0)
self.assertEqual(len(df), 256)
| bsd-2-clause |
rknLA/sms-tools | lectures/08-Sound-transformations/plots-code/sineModelFreqScale-orchestra.py | 21 | 2666 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
import sineTransformations as SMT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
freqScaling = np.array([0, .8, 1, 1.2])
ytfreq = SMT.sineFreqScaling(tfreq, freqScaling)
y = SM.sineModelSynth(ytfreq, tmag, np.array([]), Ns, H, fs)
mY, pY = STFT.stftAnal(y, fs, w, N, H)
UF.wavwrite(y,fs, 'sineModelFreqScale-orchestra.wav')
maxplotfreq = 4000.0
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (orchestra.wav)')
plt.subplot(4,1,2)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,3)
numFrames = int(ytfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = ytfreq*np.less(ytfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('freq-scaled sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('sineModelFreqScale-orchestra.png')
plt.show()
| agpl-3.0 |
ElOceanografo/PyCWT | pycwt/pycwt.py | 1 | 25290 | '''
Python Continuous Wavelet Transform
===================================
This module implements the Continuous Wavelet Transform (CWT).
Mathematics are taken from Torrence and Compo 1998, and this Python
code is a significantly refactored (though (hopefully!) mathematically
identical) version of their Matlab original.
The following copyright notice appears in the original:
--------------------------------------------------------------------------
Copyright (C) 1995-2004, Christopher Torrence and Gilbert P. Compo
This software may be used, copied, or redistributed as long as it is not
sold and this copyright notice is reproduced on each copy made. This
routine is provided as is without any express or implied warranties
whatsoever.
Notice: Please acknowledge the use of the above software in any publications:
``Wavelet software was provided by C. Torrence and G. Compo,
and is available at URL: http://paos.colorado.edu/research/wavelets/.
Reference: Torrence, C. and G. P. Compo, 1998: A Practical Guide to
Wavelet Analysis. <I>Bull. Amer. Meteor. Soc.</I>, 79, 61-78.
Please send a copy of such publications to either C. Torrence or G. Compo:
Dr. Christopher Torrence Dr. Gilbert P. Compo
Research Systems, Inc. Climate Diagnostics Center
4990 Pearl East Circle 325 Broadway R/CDC1
Boulder, CO 80301, USA Boulder, CO 80305-3328, USA
E-mail: chris[AT]rsinc[DOT]com E-mail: compo[AT]colorado[DOT]edu
--------------------------------------------------------------------------
A number of useful functions have been added, including cross-wavelet analysis,
a non-parametric significance testing procedure, functions for time- and
scale-averageing, and plotting methods.
'''
import scipy as sp
from scipy.stats.distributions import chi2
from scipy import special
from scipy.special import gamma
from scipy.ndimage import convolve
from correlations import acvf
import matplotlib.pyplot as plt
from matplotlib.mlab import fftsurr
class Wavelet(object):
'''
Object representing a mother wavelet.
'''
def __init__(self, fourier_factor, dofmin, coi_factor, C_delta, gamma, dj0):
self.fourier_factor = fourier_factor
self.dofmin = dofmin
self.coi_factor = coi_factor
self.C_delta = C_delta
self.gamma = gamma
self.dj0 = dj0
def daughter(self, scale, N, dt=1):
pass
class Paul(Wavelet):
def __init__(self, order=4):
'''
Object representing the mother Paul wavelet.
Parameters
----------
(for the __init__ method)
order : int
Shape parameter for the wavelet, defaults to 4.
'''
self.order = order
if order == 4:
Wavelet.__init__(self,
fourier_factor=4 * sp.pi / (2 * order + 1),
dofmin=2, coi_factor=1 / sp.sqrt(2), C_delta=1.132,
gamma=1.17, dj0=1.5)
else:
Wavelet.__init__(self,
fourier_factor=4 * sp.pi / (2 * order + 1),
dofmin=2, coi_factor=1 / sp.sqrt(2), C_delta=None,
gamma=None, dj0=None)
def daughter(self, scale, N, dt=1):
k = sp.arange(int(N/2)) * 2 * sp.pi / (N * dt)
k = sp.hstack((k, -sp.flipud(k)))
if len(k) == N + 1:
k = k[1: ]
expnt = -(scale * k) * (k > 0.)
norm = sp.sqrt(scale * k[1]) * (2**self.order
/ sp.sqrt(self.order * sp.prod(sp.arange(2, (2 * self.order - 1))))) * sp.sqrt(N)
daughter = norm * ((scale * k)**self.order) * sp.exp(expnt);
daughter = daughter * (k > 0.) # Heaviside step function
return daughter
class DOG(Wavelet):
def __init__(self, order=2):
'''
Object representing the mother derivative-of-Gaussian (DOG or 'Mexican Hat')
wavelet.
Parameters
----------
(for the __init__ method)
order : int
Shape parameter for the wavelet, currently only accepts order 2.
'''
self.order = order
if order == 2:
Wavelet.__init__(self,
fourier_factor = 2 * sp.pi * sp.sqrt(2. / (2 * order + 1)),
dofmin=1, coi_factor=sp.sqrt(2), C_delta=3.541, gamma=1.43,
dj0=1.4)
elif order == 6:
Wavelet.__init__(self,
fourier_factor = 2 * sp.pi * sp.sqrt(2. / (2 * order + 1)),
dofmin=1, coi_factor=sp.sqrt(2), C_delta=1.966, gamma=1.37,
dj0=0.97)
else:
Wavelet.__init__(self,
fourier_factor = 2 * sp.pi * sp.sqrt(2. / (2 * order + 1)),
dofmin=1, coi_factor=sp.sqrt(2), C_delta=None, gamma=None,
dj0=None)
def daughter(self, scale, N, dt=1):
k = sp.arange(int(N/2)) * 2 * sp.pi / (N * dt)
k = sp.hstack((k, -sp.flipud(k)))
if len(k) == N + 1:
k = k[1: ]
expnt = -(scale * k)**2 / 2.0
norm = sp.sqrt(scale * k[1] / special.gamma(self.order + 0.5)) * sp.sqrt(N);
daughter = -norm * (1j**self.order) * ((scale * k)**self.order) * sp.exp(expnt)
return daughter
class Morlet(Wavelet):
def __init__(self, k0=6):
'''
Object representing the mother Morlet wavelet.
Parameters
----------
(for the __init__ method)
k0 : int
Frequency parameter, defaults to 6.
'''
self.k0 = k0
Wavelet.__init__(self,
fourier_factor=(4*sp.pi) / (self.k0+sp.sqrt(2+self.k0**2)),
dofmin=2, coi_factor=1/sp.sqrt(2), C_delta=0.776, gamma=2.32, dj0=0.6)
def daughter(self, scale, N, dt=1):
'''
Returns a daughter wavelet to be multiplied with the
Fourier-transformed time series.
Parameters
----------
scale : float
Scale of the wavelet.
N : int
Number of observations in the series being transformed.
dt : int
Number of observations per unit time.
Returns
-------
daughter : ndarray
"Daughter" wavelet (the Fourier transform of the
mother wavelet of the appropriate scale and length)
'''
k = sp.arange(int(N/2)) * 2 * sp.pi / (N * dt)
k = sp.hstack((k, -sp.flipud(k)))
if len(k) == N + 1:
k = k[1: ]
expnt = -(scale * k - self.k0)**2 / 2. * (k > 0)
norm = sp.sqrt(scale * k[1]) * sp.pi**(-0.25) * sp.sqrt(N) # total energy = N (Eqn. 7)
daughter = norm * sp.exp(expnt)
daughter = daughter * (k >0)
return daughter
class WaveletTransform(object):
'''
Object encapsulating the results of a continuous wavelet transform.
Parameters
----------
(For __init__ method)
wave : complex ndarray
Array of the wavelet coefficients.
scales : ndarray
The scales at which the transform was performed.
dscale : float
The resolution in scale (i.e., fractions of an octave)
wavelet : Wavelet
Mother Wavelet object to use in the transform.
period : ndarray
Array of physical periods the transform was performed at.
Equal to `scales` multiplied by `wavelet.fourier_factor`.
series : ndarray
The series the transform was performed on.
dt : float
Sampling interval of the time series.
coi : ndarray
Cone of influence. CWT coefficients outside this area
are likely decreased in magnitude due to zero-padding
at the ends of the series.
'''
def __init__(self, series, wave, scales, dscale, wavelet, dt=1.0):
self.series = series
self.time = sp.arange(len(series)) * dt
self.wave = wave
self.scales = scales
self.dscale = dscale
self.wavelet = wavelet
self.period = self.scales * self.wavelet.fourier_factor
self.dt = dt
self.coi = wavelet.coi_factor # * wavelet.fourier_factor
def power(self):
'''
Return the array of wavelet power (i.e., the squared modulus
of the complex wavelet coefficients).
'''
return abs(self.wave)**2
def phase(self, degrees=False):
'''
Return an array of the phase angles of the wavelet coefficients,
in radians (set degrees=True for degrees).
'''
phase = sp.angle(self.wave)
if degrees:
phase *= 180 / sp.pi
return phase
def _sig_surface(self, siglevel):
'''
Significance surface for plotting.
'''
sig = wave_signif(self, siglevel, lag1(self.series))
sig = sp.tile(sig, (len(self.series), 1)).T
return sig
def _add_coi(self, color, data_present=None, fill=False):
n = len(self.series)
coi_whole = self.coi * self.dt * sp.hstack((sp.arange((n + 1) / 2),
sp.flipud(sp.arange(n / 2))))
coi_list = [coi_whole]
baseline = sp.ones(n) * self.period[-1]
if data_present is not None:
for i in range(2, len(data_present) - 1):
if data_present[i - 1] and (not data_present[i]):
coi_list.append(circ_shift(coi_whole, i))
baseline[i] = 0
elif not data_present[i]:
baseline[i] = 0
elif (not data_present[i - 1]) and data_present[i]:
coi_list.append(circ_shift(coi_whole, i))
coi_list.append(baseline)
coi_line = sp.array(coi_list).min(axis=0)
coi_line[coi_line == 0] = 1e-4
x = sp.hstack((self.time, sp.flipud(self.time)))
y = sp.log2(sp.hstack((coi_line, sp.ones(n) * self.period[-1])))
if fill:
plt.fill(x, y, color='black', alpha=0.3)
else:
plt.plot(self.time, sp.log2(coi_line), color=color, linestyle=':')
def plot(self, norm=False, plot_type='power', degrees=False, kernel=None, coi=True,
data_present=None, siglevel=None, lag1=0.0, coi_color='white',
coi_fill=False, sig_color='white', *args, **kwargs):
'''
Display an image of the wavelet coefficients or phase.
Parameters
----------
norm : logical
If true, plot the wavelet power normalized by the time series
variance. Defaults to False.
plot_type : string
Specifies what to plot. Options are 'power', 'phase', and
'coherence'. Defaults to 'power'
degrees : logical
If plotting the phase, should it be in degrees instead of
radians? Defaults to False.
kernel : array-like
If plotting the coherence, a smoothing kernel must be supplied.
data_present : array-like
Boolean array the same length as the time series, with "True" values
where corresponding to valid measurements. Used to draw cone-of-
influence lines in the middle of the series (as in the case of
missing of invalid data).
siglevel : float
If given, plots the significance contours at this level.
lag1 : float
Lag-1 autocorrelation used for significance testing using an
AR(1) red-noise null hypothesis.
coi_color, sig_color : matplotlib.mpl.colors colors
colors for the cone-of-influence and significance
contour, if plotted. Default to white.
coi_fill : logical
If true, plot the coi as a transparently-shaded region. Defaults to
False.
*args, **kwargs : Additional arguments passed to contourf().
Returns
-------
A `matplotlib.image.AxesImage` instance.
'''
if plot_type == 'phase':
values = self.phase(degrees=degrees)
colormap = plt.cm.hsv
elif plot_type == 'coherence':
values = self.coherence(kernel)
colormap = plt.cm.jet
elif plot_type == 'power':
values = self.power()
colormap = plt.cm.jet
else:
raise ValueError("plot_type must be 'power', 'phase', or 'coherence'.")
if norm:
values /= self.series[sp.isfinite(self.series)].var()
ax = plt.contourf(self.time, sp.log2(self.scales), values,
*args, **kwargs)
plt.ylim(sp.log2(sp.array([self.scales[-1], self.scales[0]])))
yt = plt.yticks()
#plt.yticks(yt[0] + 1, 2**yt[0])
plt.yticks(yt[0], 2**yt[0])
if coi:
self._add_coi(color=coi_color, data_present=data_present)
if siglevel is not None:
if phase:
print "Significance testing not available for wavelet phase."
sig = self._sig_surface(siglevel)
plt.contour(self.time, sp.log2(self.period), self.power() - sig,
levels=[0], colors=sig_color, antialiased=True)
return ax
class CrossWaveletTransform(WaveletTransform):
def __init__(self, series1, series2, wave1, wave2, *args, **kwargs):
series = sp.vstack((series1.ravel(), series2.ravel())).T
wave = sp.dstack((wave1, wave2))
WaveletTransform.__init__(self, series, wave, *args, **kwargs)
def power(self):
return abs(self.wave[:,:,0] * sp.conj(self.wave[:,:,1]))
def coherence(self, kernel):
numerator = convolve(self.power(), kernel)
denominator = (convolve(abs(self.wave[:,:,0])**2, kernel)
* convolve(abs(self.wave[:,:,1])**2, kernel))**0.5,
#denominator = convolve((abs(self.wave[:,:,0])**2 * abs(self.wave[:,:,1])**2)**0.5,
# kernel)
return (numerator / denominator).reshape((self.scales.size, self.series.shape[0]))
def _sig_surface(self, siglevel):
a1 = lag1(self.series[:, 0])
a2 = lag1(self.series[:, 1])
sig = cross_wave_signif(self, siglevel, a1, a2)
return sp.tile(sig, (len(self.series), 1)).T
def cwt(series, wavelet, octaves=None, dscale=0.25, minscale=None, dt=1.0):
'''
Perform a continuous wavelet transform on a series.
Parameters
----------
series : ndarray
octaves : int
Number of powers-of-two over which to perform the transform.
dscale : float
Fraction of power of two separating the scales. Defaults to 0.25.
minscale : float
Minimum scale. If none supplied, defaults to 2.0 * dt.
dt : float
Time step between observations in the series.
Returns
-------
WaveletTransform
WaveletTransform object with the results of the CWT.
See Also
--------
ccwt : Cross continuous wavelet transform, for the wavelet
coherence between two series
Notes
-----
This function uses a fast Fourier Transform (FFT) to convolve
the wavelet with the series at each scale. For details, see:
Torrence, C. and G. P. Compo, 1998: A Practical Guide to
Wavelet Analysis. <I>Bull. Amer. Meteor. Soc.</I>, 79, 61-78.
'''
# Generate the array of scales
if not minscale: minscale = 2.0 * dt
if not octaves:
octaves = int(sp.log2(len(series) * dt / minscale) / dscale) * dscale
scales = minscale * 2**sp.arange(octaves + dscale, step=dscale)
# Demean and pad time series with zeroes to next highest power of 2
N = len(series)
series = pad(series - series.mean())
N_padded = len(series)
wave = sp.zeros((len(scales), N_padded)) + complex(0, 0)
series_ft = sp.fft(series)
for i, s in enumerate(scales):
wave[i, :] = sp.ifft(series_ft * wavelet.daughter(s, N_padded, dt))
wave = wave[:, :N]
series = series[:N]
return WaveletTransform(series, wave, scales, dscale, wavelet, dt)
def ccwt(series1, series2, *args, **kwargs):
t1 = cwt(series1, *args, **kwargs)
t2 = cwt(series2, *args, **kwargs)
return CrossWaveletTransform(t1.series, t2.series, t1.wave, t2.wave,
t2.scales, t2.dscale, t2.wavelet, t2.dt)
def pad(series):
'''
Returns a time series padded with zeros to the
next-highest power of two.
'''
N = len(series)
next_N = 2 ** sp.ceil(sp.log2(N))
return sp.hstack((series, sp.zeros(next_N - N)))
def normalize(series):
'''
Returns the series demeaned and divided by its standard deviation.
'''
mean = series[sp.isfinite(series)].mean()
sdev = series[sp.isfinite(series)].std()
return (series - mean) / sdev
def add_half_coi(cwt, t, coi, edge):
'''
edge : int
If edge = 1, add the COI to the right of the current tumn.
If edge = -1, add the coi to the left of the current tumn.
'''
for s in sp.arange(cwt.shape[0]):
bounds = [t, t + edge * coi[s]]
cwt[s, min(bounds):max(bounds) + 1] = True
def mask_coi(cwt, coi, data_present=None, axis=0):
'''
mask_coi(cwt, coi, series=None, axis=0)
Return a copy of cwt, masked with cone of influence coi.
Parameters
----------
cwt : ndarray
An array of wavelet transform coefficients.
coi : array_like
Widths of the cone of influence, as a function of scale.
Length must match the scale dimension of the cwt array.
data_present : array_like, optional
Optional series of boolean values, representing the locations
of missing data in the original time series. Values of 0 or
False indicate missing values. If supplied, this series will
be used to draw cones of influence at the edges of the missing data
regions.
axis : integer, optional
Axis along which the COI is added (i.e., the scale axis). Defaults
to 0.
Returns
-------
masked_array(cwt, mask)
The input array, masked by the appropriate cone(s) of influence
'''
if axis == 1:
cwt = cwt.T
mask = sp.zeros_like(abs(cwt))
add_half_coi(mask, 0, coi, 1)
if data_present != None:
for i in range(2, len(data_present) - 1):
if data_present[i - 1] and (not data_present[i]):
add_half_coi(mask, i, coi, -1)
elif (not data_present[i - 1]) and data_present[i]:
add_half_coi(mask, i, coi, 1)
elif not data_present[i]:
mask[: , i] = True
add_half_coi(mask, mask.shape[1], coi, -1)
return sp.ma.masked_array(cwt, mask)
def phase(wave):
return sp.arctan(sp.imag(wave) / sp.real(wave))
def circ_shift(x, shift):
return sp.hstack((x[-shift: ], x[:-shift]))
def lag1(x):
'''
Find the lag-1 autocorrelation of a time series (i.e. fit an AR(1) model).
Uses the Levinson-Durbin algorithm.
'''
a = acvf(x)
return a[1] / a[0]
def red_spectrum(lag1, freq):
return (1. - lag1**2) / (1.0 - 2.0 * lag1 * sp.cos(freq * 2.0 * sp.pi) + lag1**2)
def d_cross_dist_nonvector(z, dof):
'''
Probability density function for cross-wavelet spectrum, assuming
both wavelet spectra are chi-squared distributed. From equation (30)
in Torrence and Compo, 1998.
Parameters
----------
z : float
The random variable.
dof : int
Degrees of freedom of the wavelet (1 for real wavelets, 2 for complex ones).
Returns
-------
d : float
Probability density at z.
'''
if z == 0:
return 0.
else:
return (2.**(2 - dof) / special.gamma(dof / 2)**2
* z**(dof - 1) * special.k0(z))
d_cross_dist = sp.vectorize(d_cross_dist_nonvector)
@sp.vectorize
def p_cross_dist(q, dof):
return sp.integrate.quad(d_cross_dist_nonvector, 0, q, (dof,))[0]
@sp.vectorize
def q_cross_dist(p, dof):
objective = lambda q: abs(p - p_cross_dist(q, dof))
return sp.optimize.fmin(objective, 1, disp=0)
def cross_wave_signif(t, siglevel=0.95, lag1=0.0, lag2=0.0):
dof = t.wavelet.dofmin
std1, std2 = sp.std(t.series, axis=0)
fft_theor1 = red_spectrum(lag1, t.dt / t.period)
fft_theor2 = red_spectrum(lag2, t.dt / t.period)
q = q_cross_dist(siglevel, dof)
return std1 * std2 * sp.sqrt(fft_theor1 * fft_theor2) * q / dof
def wave_signif(t, siglevel=0.95, lag1=0.0, test='local', dof=None, scale_range=None):
fft_theor = red_spectrum(lag1, t.dt / t.period)
fft_theor *= t.series.var() # Include time-series variance
# No smoothing, DOF = dofmin (Sec. 4)
if test == 'local':
return fft_theor * chi2.ppf(siglevel, t.wavelet.dofmin) / t.wavelet.dofmin
# Time-averaged significance
elif test == 'global':
# Eqn. 23
dof = t.wavelet.dofmin * sp.sqrt(1 + ((len(t.series) * t.dt) /
(t.wavelet.gamma * t.scales))**2)
dof[dof < t.wavelet.dofmin] = t.wavelet.dofmin # minimum DOF is dofmin
return fft_theor * chi2.ppf(siglevel, dof) / dof
elif test == 'scale':
if not scale_range:
raise ValueError("Must supply a scale_range for time-averaged \
significance testing.")
if period:
scale_indices = (transform.period >= min(scale_range)) \
& (transform.period <= max(scale_range))
else:
scale_indices = (transform.scales >= min(scale_range)) \
& (transform.scales <= max(scale_range))
scale_indices = sp.arange(len(scale_indices))[scale_indices]
na = len(t.series)
savg = scale_avg(t, min(scale_range), max(scale_range))
smid = t.minscale * 2 ** (0.5 * (min(scale_range) + max(scale_range)) * t.dscale)
dof = 2 * len(na) * savg[1] / smid \
* sp.sqrt(1 + (na * t.dscale / t.wavelet.dj0)**2)
P = savg[1] * (fft_theor[scale_indices] / t.scales[scale_indices]).sum()
def time_avg(transform, start=None, end=None):
'''
Doesn't work as intended for cross-wavelet transforms'
'''
return (abs(transform.wave[:, start:end])**2).mean(axis=1) / sp.var(transform.series)
def scale_avg(transform, min, max, period=True, norm=False):
if period:
scale_indices = (transform.period >= min) & (transform.period <= max)
else:
scale_indices = (transform.scales >= min) & (transform.scales <= max)
scale_indices = sp.arange(len(scale_indices))[scale_indices]
band = transform.wave[scale_indices, :]
scales = transform.scales[scale_indices].reshape((len(scale_indices), 1))
w = transform.wavelet
W_avg = (transform.dscale * transform.dt / w.C_delta) \
* (abs(band)**2 / scales).sum(axis=0) # Eqn. 24
scale_avg = float(1 / sum(1 / scales))
if norm:
W_avg = W_avg * t.wavelet.C_delta * scale_avg / (t.dscale * t.dt * t.series.var())
return W_avg, scale_avg
def plot_cwt(t):
s1 = plt.subplot(221)
t.plot()
s2 = plt.subplot(222)
spec = time_avg(t)
plt.plot(spec, sp.log2(t.period))
plt.ylim(sp.log2(t.period).max(), sp.log2(t.period).min())
nscales = len(t.scales)
yt = sp.arange(nscales, step=int(1 / self.dscale))
plt.yticks(yt, t.scales[yt])
plt.ylim(nscales - 1, 0)
s1.set_position((0.1, 0.1, 0.65, 0.8))
s2.set_position((0.8, 0.1, 0.15, 0.8))
def bootstrap_signif(t, n):
'''
Estimates the significance level of a wavelet transform using a
nonparametric bootstrapping procedure.
Parameters
----------
t : WaveletTransform or CrossWaveletTransform
n : int
Number of realizations of the random series to test.
Returns
-------
signif : array
Float array the same shape as t.wave, containing estimated p-values
for each time and scale.
Details
-------
Generates n simulated time series with the same power spectrum as
the original series, via phase randomization, performing the wavelet
transform on each one. For each time/scale point in the original
transform's wavelet spectrum, counts the number of times the
corresponding point in a simulated spectrum is greater, then divides
by the number of simulations to get the p-value.
'''
n_greater = sp.zeros_like(t.power())
if type(t) == CrossWaveletTransform:
for i in range(n):
x1 = fftsurr(t.series[:, 0])
x2 = fftsurr(t.series[:, 1])
t_sim = ccwt(x1, x2, t.wavelet, octaves=(t.wave.shape[0]-1) * t.dscale,
dscale=t.dscale, minscale=t.scales.min(), dt=t.dt)
n_greater += t_sim.power() > t.power()
else:
for i in range(n):
x = fftsurr(t.series)
t_sim = cwt(x, t.wavelet, octaves=(t.wave.shape[0]-1) * t.dscale,
dscale=t.dscale, minscale=t.scales.min(), dt=t.dt)
n_greater += t_sim.power() > t.power()
return n_greater / n
| gpl-2.0 |
hksonngan/pynopticon | src/em/gauss_mix.py | 4 | 26027 | # /usr/bin/python
# Last Change: Tue Jul 17 11:00 PM 2007 J
"""Module implementing GM, a class which represents Gaussian mixtures.
GM instances can be used to create, sample mixtures. They also provide
different plotting facilities, such as isodensity contour for multi dimensional
models, ellipses of confidence."""
__docformat__ = 'restructuredtext'
import numpy as N
from numpy.random import randn, rand
import numpy.linalg as lin
import densities as D
import misc
# Right now, two main usages of a Gaussian Model are possible
# - init a Gaussian Model with meta-parameters, and trains it
# - set-up a Gaussian Model to sample it, draw ellipsoides
# of confidences. In this case, we would like to init it with
# known values of parameters. This can be done with the class method
# fromval
# TODO:
# - change bounds methods of GM class instanciations so that it cannot
# be used as long as w, mu and va are not set
# - We have to use scipy now for chisquare pdf, so there may be other
# methods to be used, ie for implementing random index.
# - there is no check on internal state of the GM, that is does w, mu and va
# values make sense (eg singular values) - plot1d is still very rhough. There
# should be a sensible way to modify the result plot (maybe returns a dic
# with global pdf, component pdf and fill matplotlib handles). Should be
# coherent with plot
class GmParamError(Exception):
"""Exception raised for errors in gmm params
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message):
Exception.__init__(self)
self.message = message
def __str__(self):
return self.message
class GM:
"""Gaussian Mixture class. This is a simple container class
to hold Gaussian Mixture parameters (weights, mean, etc...).
It can also draw itself (confidence ellipses) and samples itself.
"""
# I am not sure it is useful to have a spherical mode...
_cov_mod = ['diag', 'full']
#===============================
# Methods to construct a mixture
#===============================
def __init__(self, d, k, mode = 'diag'):
"""Init a Gaussian Mixture.
:Parameters:
d : int
dimension of the mixture.
k : int
number of component in the mixture.
mode : string
mode of covariance
:Returns:
an instance of GM.
Note
----
Only full and diag mode are supported for now.
:SeeAlso:
If you want to build a Gaussian Mixture with knowns weights, means
and variances, you can use GM.fromvalues method directly"""
if mode not in self._cov_mod:
raise GmParamError("mode %s not recognized" + str(mode))
self.d = d
self.k = k
self.mode = mode
# Init to 0 all parameters, with the right dimensions.
# Not sure this is useful in python from an efficiency POV ?
self.w = N.zeros(k)
self.mu = N.zeros((k, d))
if mode == 'diag':
self.va = N.zeros((k, d))
elif mode == 'full':
self.va = N.zeros((k * d, d))
self.__is_valid = False
if d > 1:
self.__is1d = False
else:
self.__is1d = True
def set_param(self, weights, mu, sigma):
"""Set parameters of the model.
Args should be conformant with metparameters d and k given during
initialisation.
:Parameters:
weights : ndarray
weights of the mixture (k elements)
mu : ndarray
means of the mixture. One component's mean per row, k row for k
components.
sigma : ndarray
variances of the mixture. For diagonal models, one row contains
the diagonal elements of the covariance matrix. For full
covariance, d rows for one variance.
Examples
--------
Create a 3 component, 2 dimension mixture with full covariance matrices
>>> w = numpy.array([0.2, 0.5, 0.3])
>>> mu = numpy.array([[0., 0.], [1., 1.]])
>>> va = numpy.array([[1., 0.], [0., 1.], [2., 0.5], [0.5, 1]])
>>> gm = GM(2, 3, 'full')
>>> gm.set_param(w, mu, va)
:SeeAlso:
If you know already the parameters when creating the model, you can
simply use the method class GM.fromvalues."""
#XXX: when fromvalues is called, parameters are called twice...
k, d, mode = check_gmm_param(weights, mu, sigma)
if not k == self.k:
raise GmParamError("Number of given components is %d, expected %d"
% (k, self.k))
if not d == self.d:
raise GmParamError("Dimension of the given model is %d, "\
"expected %d" % (d, self.d))
if not mode == self.mode and not d == 1:
raise GmParamError("Given covariance mode is %s, expected %s"
% (mode, self.mode))
self.w = weights
self.mu = mu
self.va = sigma
self.__is_valid = True
@classmethod
def fromvalues(cls, weights, mu, sigma):
"""This class method can be used to create a GM model
directly from its parameters weights, mean and variance
:Parameters:
weights : ndarray
weights of the mixture (k elements)
mu : ndarray
means of the mixture. One component's mean per row, k row for k
components.
sigma : ndarray
variances of the mixture. For diagonal models, one row contains
the diagonal elements of the covariance matrix. For full
covariance, d rows for one variance.
:Returns:
gm : GM
an instance of GM.
Examples
--------
>>> w, mu, va = GM.gen_param(d, k)
>>> gm = GM(d, k)
>>> gm.set_param(w, mu, va)
and
>>> w, mu, va = GM.gen_param(d, k)
>>> gm = GM.fromvalue(w, mu, va)
are strictly equivalent."""
k, d, mode = check_gmm_param(weights, mu, sigma)
res = cls(d, k, mode)
res.set_param(weights, mu, sigma)
return res
#=====================================================
# Fundamental facilities (sampling, confidence, etc..)
#=====================================================
def sample(self, nframes):
""" Sample nframes frames from the model.
:Parameters:
nframes : int
number of samples to draw.
:Returns:
samples : ndarray
samples in the format one sample per row (nframes, d)."""
if not self.__is_valid:
raise GmParamError("""Parameters of the model has not been
set yet, please set them using self.set_param()""")
# State index (ie hidden var)
sti = gen_rand_index(self.w, nframes)
# standard gaussian samples
x = randn(nframes, self.d)
if self.mode == 'diag':
x = self.mu[sti, :] + x * N.sqrt(self.va[sti, :])
elif self.mode == 'full':
# Faster:
cho = N.zeros((self.k, self.va.shape[1], self.va.shape[1]))
for i in range(self.k):
# Using cholesky looks more stable than sqrtm; sqrtm is not
# available in numpy anyway, only in scipy...
cho[i] = lin.cholesky(self.va[i*self.d:i*self.d+self.d, :])
for s in range(self.k):
tmpind = N.where(sti == s)[0]
x[tmpind] = N.dot(x[tmpind], cho[s].T) + self.mu[s]
else:
raise GmParamError("cov matrix mode not recognized, "\
"this is a bug !")
return x
def conf_ellipses(self, dim = misc.DEF_VIS_DIM, npoints = misc.DEF_ELL_NP,
level = misc.DEF_LEVEL):
"""Returns a list of confidence ellipsoids describing the Gmm
defined by mu and va. Check densities.gauss_ell for details
:Parameters:
dim : sequence
sequences of two integers which represent the dimensions where to
project the ellipsoid.
npoints : int
number of points to generate for the ellipse.
level : float
level of confidence (between 0 and 1).
:Returns:
xe : sequence
a list of x coordinates for the ellipses (Xe[i] is the array
containing x coordinates of the ith Gaussian)
ye : sequence
a list of y coordinates for the ellipses.
Examples
--------
Suppose we have w, mu and va as parameters for a mixture, then:
>>> gm = GM(d, k)
>>> gm.set_param(w, mu, va)
>>> X = gm.sample(1000)
>>> Xe, Ye = gm.conf_ellipsoids()
>>> pylab.plot(X[:,0], X[:, 1], '.')
>>> for k in len(w):
... pylab.plot(Xe[k], Ye[k], 'r')
Will plot samples X draw from the mixture model, and
plot the ellipses of equi-probability from the mean with
default level of confidence."""
if self.__is1d:
raise ValueError("This function does not make sense for 1d "
"mixtures.")
if not self.__is_valid:
raise GmParamError("""Parameters of the model has not been
set yet, please set them using self.set_param()""")
xe = []
ye = []
if self.mode == 'diag':
for i in range(self.k):
x, y = D.gauss_ell(self.mu[i, :], self.va[i, :],
dim, npoints, level)
xe.append(x)
ye.append(y)
elif self.mode == 'full':
for i in range(self.k):
x, y = D.gauss_ell(self.mu[i, :],
self.va[i*self.d:i*self.d+self.d, :],
dim, npoints, level)
xe.append(x)
ye.append(y)
return xe, ye
def check_state(self):
"""Returns true if the parameters of the model are valid.
For Gaussian mixtures, this means weights summing to 1, and variances
to be positive definite.
"""
if not self.__is_valid:
raise GmParamError("Parameters of the model has not been"\
"set yet, please set them using self.set_param()")
# Check condition number for cov matrix
if self.mode == 'diag':
tinfo = N.finfo(self.va.dtype)
if N.any(self.va < tinfo.eps):
raise GmParamError("variances are singular")
elif self.mode == 'full':
try:
d = self.d
for i in range(self.k):
N.linalg.cholesky(self.va[i*d:i*d+d, :])
except N.linalg.LinAlgError:
raise GmParamError("matrix %d is singular " % i)
else:
raise GmParamError("Unknown mode")
return True
@classmethod
def gen_param(cls, d, nc, mode = 'diag', spread = 1):
"""Generate random, valid parameters for a gaussian mixture model.
:Parameters:
d : int
the dimension
nc : int
the number of components
mode : string
covariance matrix mode ('full' or 'diag').
:Returns:
w : ndarray
weights of the mixture
mu : ndarray
means of the mixture
w : ndarray
variances of the mixture
Notes
-----
This is a class method.
"""
w = N.abs(randn(nc))
w = w / sum(w, 0)
mu = spread * N.sqrt(d) * randn(nc, d)
if mode == 'diag':
va = N.abs(randn(nc, d))
elif mode == 'full':
# If A is invertible, A'A is positive definite
va = randn(nc * d, d)
for k in range(nc):
va[k*d:k*d+d] = N.dot( va[k*d:k*d+d],
va[k*d:k*d+d].transpose())
else:
raise GmParamError('cov matrix mode not recognized')
return w, mu, va
#gen_param = classmethod(gen_param)
def pdf(self, x, log = False):
"""Computes the pdf of the model at given points.
:Parameters:
x : ndarray
points where to estimate the pdf. One row for one
multi-dimensional sample (eg to estimate the pdf at 100
different points in 10 dimension, data's shape should be (100,
20)).
log : bool
If true, returns the log pdf instead of the pdf.
:Returns:
y : ndarray
the pdf at points x."""
if log:
return D.logsumexp(
D.multiple_gauss_den(x, self.mu, self.va, log = True)
+ N.log(self.w))
else:
return N.sum(D.multiple_gauss_den(x, self.mu, self.va) * self.w, 1)
def pdf_comp(self, x, cid, log = False):
"""Computes the pdf of the model at given points, at given component.
:Parameters:
x : ndarray
points where to estimate the pdf. One row for one
multi-dimensional sample (eg to estimate the pdf at 100
different points in 10 dimension, data's shape should be (100,
20)).
cid: int
the component index.
log : bool
If true, returns the log pdf instead of the pdf.
:Returns:
y : ndarray
the pdf at points x."""
if self.mode == 'diag':
va = self.va[cid]
elif self.mode == 'full':
va = self.va[cid*self.d:(cid+1)*self.d]
else:
raise GmParamError("""var mode %s not supported""" % self.mode)
if log:
return D.gauss_den(x, self.mu[cid], va, log = True) \
+ N.log(self.w[cid])
else:
return D.multiple_gauss_den(x, self.mu[cid], va) * self.w[cid]
#=================
# Plotting methods
#=================
def plot(self, dim = misc.DEF_VIS_DIM, npoints = misc.DEF_ELL_NP,
level = misc.DEF_LEVEL):
"""Plot the ellipsoides directly for the model
Returns a list of lines handle, so that their style can be modified. By
default, the style is red color, and nolegend for all of them.
:Parameters:
dim : sequence
sequence of two integers, the dimensions of interest.
npoints : int
Number of points to use for the ellipsoids.
level : int
level of confidence (to use with fill argument)
:Returns:
h : sequence
Returns a list of lines handle so that their properties
can be modified (eg color, label, etc...):
Note
----
Does not work for 1d. Requires matplotlib
:SeeAlso:
conf_ellipses is used to compute the ellipses. Use this if you want
to plot with something else than matplotlib."""
if self.__is1d:
raise ValueError("This function does not make sense for 1d "
"mixtures.")
if not self.__is_valid:
raise GmParamError("""Parameters of the model has not been
set yet, please set them using self.set_param()""")
k = self.k
xe, ye = self.conf_ellipses(dim, npoints, level)
try:
import pylab as P
return [P.plot(xe[i], ye[i], 'r', label='_nolegend_')[0] for i in
range(k)]
except ImportError:
raise GmParamError("matplotlib not found, cannot plot...")
def plot1d(self, level = misc.DEF_LEVEL, fill = False, gpdf = False):
"""Plots the pdf of each component of the 1d mixture.
:Parameters:
level : int
level of confidence (to use with fill argument)
fill : bool
if True, the area of the pdf corresponding to the given
confidence intervales is filled.
gpdf : bool
if True, the global pdf is plot.
:Returns:
h : dict
Returns a dictionary h of plot handles so that their properties
can be modified (eg color, label, etc...):
- h['pdf'] is a list of lines, one line per component pdf
- h['gpdf'] is the line for the global pdf
- h['conf'] is a list of filling area
"""
if not self.__is1d:
raise ValueError("This function does not make sense for "\
"mixtures which are not unidimensional")
from scipy.stats import norm
pval = N.sqrt(self.va[:, 0]) * norm(0, 1).ppf((1+level)/2)
# Compute reasonable min/max for the normal pdf: [-mc * std, mc * std]
# gives the range we are taking in account for each gaussian
mc = 3
std = N.sqrt(self.va[:, 0])
mi = N.amin(self.mu[:, 0] - mc * std)
ma = N.amax(self.mu[:, 0] + mc * std)
np = 500
x = N.linspace(mi, ma, np)
# Prepare the dic of plot handles to return
ks = ['pdf', 'conf', 'gpdf']
hp = dict((i, []) for i in ks)
# Compute the densities
y = D.multiple_gauss_den(x[:, N.newaxis], self.mu, self.va, \
log = True) \
+ N.log(self.w)
yt = self.pdf(x[:, N.newaxis])
try:
import pylab as P
for c in range(self.k):
h = P.plot(x, N.exp(y[:, c]), 'r', label ='_nolegend_')
hp['pdf'].extend(h)
if fill:
# Compute x coordinates of filled area
id1 = -pval[c] + self.mu[c]
id2 = pval[c] + self.mu[c]
xc = x[:, N.where(x>id1)[0]]
xc = xc[:, N.where(xc<id2)[0]]
# Compute the graph for filling
yf = self.pdf_comp(xc, c)
xc = N.concatenate(([xc[0]], xc, [xc[-1]]))
yf = N.concatenate(([0], yf, [0]))
h = P.fill(xc, yf, facecolor = 'b', alpha = 0.1,
label='_nolegend_')
hp['conf'].extend(h)
if gpdf:
h = P.plot(x, yt, 'r:', label='_nolegend_')
hp['gpdf'] = h
return hp
except ImportError:
raise GmParamError("matplotlib not found, cannot plot...")
def density_on_grid(self, dim = misc.DEF_VIS_DIM, nx = 50, ny = 50,
nl = 20, maxlevel = 0.95, v = None):
"""Do all the necessary computation for contour plot of mixture's
density.
:Parameters:
dim : sequence
sequence of two integers, the dimensions of interest.
nx : int
Number of points to use for the x axis of the grid
ny : int
Number of points to use for the y axis of the grid
nl : int
Number of contour to plot.
:Returns:
X : ndarray
points of the x axis of the grid
Y : ndarray
points of the y axis of the grid
Z : ndarray
values of the density on X and Y
V : ndarray
Contour values to display.
Note
----
X, Y, Z and V are as expected by matplotlib contour function."""
if self.__is1d:
raise ValueError("This function does not make sense for 1d "
"mixtures.")
# Ok, it is a bit gory. Basically, we want to compute the size of the
# grid. We use conf_ellipse, which will return a couple of points for
# each component, and we can find a grid size which then is just big
# enough to contain all ellipses. This won't work well if two
# ellipsoids are crossing each other a lot (because this assumes that
# at a given point, one component is largely dominant for its
# contribution to the pdf).
xe, ye = self.conf_ellipses(level = maxlevel, dim = dim)
ax = [N.min(xe), N.max(xe), N.min(ye), N.max(ye)]
w = ax[1] - ax[0]
h = ax[3] - ax[2]
x, y, lden = self._densityctr(N.linspace(ax[0]-0.2*w,
ax[1]+0.2*w, nx),
N.linspace(ax[2]-0.2*h,
ax[3]+0.2*h, ny),
dim = dim)
# XXX: how to find "good" values for level ?
if v is None:
v = N.linspace(-5, N.max(lden), nl)
return x, y, lden, N.array(v)
def _densityctr(self, rangex, rangey, dim = misc.DEF_VIS_DIM):
"""Helper function to compute density contours on a grid."""
gr = N.meshgrid(rangex, rangey)
x = gr[0].flatten()
y = gr[1].flatten()
xdata = N.concatenate((x[:, N.newaxis], y[:, N.newaxis]), axis = 1)
dmu = self.mu[:, dim]
dva = self._get_va(dim)
den = GM.fromvalues(self.w, dmu, dva).pdf(xdata, log = True)
den = den.reshape(len(rangey), len(rangex))
return gr[0], gr[1], den
def _get_va(self, dim):
"""Returns variance limited do 2 dimension in tuple dim."""
assert len(dim) == 2
dim = N.array(dim)
if dim.any() < 0 or dim.any() >= self.d:
raise ValueError("dim elements should be between 0 and dimension"
" of the mixture.")
if self.mode == 'diag':
return self.va[:, dim]
elif self.mode == 'full':
ld = dim.size
vaselid = N.empty((ld * self.k, ld), N.int)
for i in range(self.k):
vaselid[ld*i] = dim[0] + i * self.d
vaselid[ld*i+1] = dim[1] + i * self.d
vadid = N.empty((ld * self.k, ld), N.int)
for i in range(self.k):
vadid[ld*i] = dim
vadid[ld*i+1] = dim
return self.va[vaselid, vadid]
else:
raise ValueError("Unkown mode")
# Syntactic sugar
def __repr__(self):
msg = ""
msg += "Gaussian Mixture:\n"
msg += " -> %d dimensions\n" % self.d
msg += " -> %d components\n" % self.k
msg += " -> %s covariance \n" % self.mode
if self.__is_valid:
msg += "Has initial values"""
else:
msg += "Has no initial values yet"""
return msg
def __str__(self):
return self.__repr__()
# Function to generate a random index: this is kept outside any class,
# as the function can be useful for other
def gen_rand_index(p, n):
"""Generate a N samples vector containing random index between 1
and length(p), each index i with probability p(i)"""
# TODO Check args here
# TODO: check each value of inverse distribution is
# different
invcdf = N.cumsum(p)
uni = rand(n)
index = N.zeros(n, dtype=int)
# This one should be a bit faster
for k in range(len(p)-1, 0, -1):
blop = N.where(N.logical_and(invcdf[k-1] <= uni,
uni < invcdf[k]))
index[blop] = k
return index
def check_gmm_param(w, mu, va):
"""Check that w, mu and va are valid parameters for
a mixture of gaussian.
w should sum to 1, there should be the same number of component in each
param, the variances should be positive definite, etc...
:Parameters:
w : ndarray
vector or list of weigths of the mixture (K elements)
mu : ndarray
matrix: K * d
va : ndarray
list of variances (vector K * d or square matrices Kd * d)
:Returns:
k : int
number of components
d : int
dimension
mode : string
'diag' if diagonal covariance, 'full' of full matrices
"""
# Check that w is valid
if not len(w.shape) == 1:
raise GmParamError('weight should be a rank 1 array')
if N.fabs(N.sum(w) - 1) > misc.MAX_DBL_DEV:
raise GmParamError('weight does not sum to 1')
# Check that mean and va have the same number of components
k = len(w)
if N.ndim(mu) < 2:
msg = "mu should be a K,d matrix, and a row vector if only 1 comp"
raise GmParamError(msg)
if N.ndim(va) < 2:
msg = """va should be a K,d / K *d, d matrix, and a row vector if
only 1 diag comp"""
raise GmParamError(msg)
(km, d) = mu.shape
(ka, da) = va.shape
if not k == km:
msg = "not same number of component in mean and weights"
raise GmParamError(msg)
if not d == da:
msg = "not same number of dimensions in mean and variances"
raise GmParamError(msg)
if km == ka:
mode = 'diag'
else:
mode = 'full'
if not ka == km*d:
msg = "not same number of dimensions in mean and variances"
raise GmParamError(msg)
return k, d, mode
if __name__ == '__main__':
pass
| gpl-3.0 |
manashmndl/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 44 | 17033 | import tempfile
import shutil
import os.path as op
import warnings
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
temp_folder = tempfile.mkdtemp()
try:
fpath = op.join(temp_folder, 'data.pkl')
joblib.dump(splitted_data, fpath)
X_train, X_test, y_train, y_test = joblib.load(fpath, mmap_mode='r')
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
finally:
# try to release the mmap file handle in time to be able to delete
# the temporary folder under windows
del X_train, X_test, y_train, y_test
try:
shutil.rmtree(temp_folder)
except shutil.WindowsError:
warnings.warn("Could not delete temporary folder %s" % temp_folder)
| bsd-3-clause |
renewables-ninja/gsee | setup.py | 1 | 1793 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages, Extension
# Sets the __version__ variable
with open("gsee/_version.py", "r") as f:
exec(f.read())
with open("README.md", "r") as f:
long_description = f.read()
# Numpy headers are always required for compliation, but to allow
# `python setup.py egg_info` to work on install in a clean environment,
# the numpy import must be wrapped in a try-except block.
try:
import numpy as np
numpy_include = [np.get_include()]
except ImportError:
numpy_include = []
setup(
name="gsee",
version=__version__,
author="Stefan Pfenninger",
author_email="stefan@pfenninger.org",
description="GSEE: Global Solar Energy Estimator",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/renewables-ninja/gsee",
packages=find_packages(),
include_package_data=True,
ext_modules=[
Extension(
"gsee.climatedata_interface.kt_h_sinusfunc",
["gsee/climatedata_interface/kt_h_sinusfunc.pyx"],
include_dirs=numpy_include,
)
],
zip_safe=False,
install_requires=[
"dask >= 2.8",
"distributed >= 2.8",
"joblib >= 0.12",
"numpy >= 1.15.0",
"pandas >= 1.0, < 1.1",
"pvlib >= 0.6.3",
"pyephem >= 3.7.6",
"scipy >= 1.1.0",
"xarray >= 0.16, < 0.17",
],
setup_requires=["cython", "numpy >= 1.15.0"],
extras_require={"generate_pdfs": ["basemap >= 1.1.0", "seaborn >= 0.9.0"],},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
)
| bsd-3-clause |
kellyschrock/apm_planner | libs/mavlink/share/pyshared/pymavlink/examples/mavgraph.py | 29 | 5951 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import pylab, pytz, matplotlib
from math import *
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from mavextra import *
locator = None
formatter = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global locator, formatter
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if x[i][-1] - x[i][0] > xrange:
xrange = x[i][-1] - x[i][0]
xrange *= 24 * 60 * 60
if formatter is None:
if xrange < 1000:
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
else:
formatter = matplotlib.dates.DateFormatter('%H:%M')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle='-', marker='None', tz=None)
pylab.draw()
empty = False
if ax1_labels != []:
ax1.legend(ax1_labels,loc=opts.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=opts.legend2)
if empty:
print("No data to graph")
return
from optparse import OptionParser
parser = OptionParser("mavgraph.py [options] <filename> <fields>")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by a condition")
parser.add_option("--labels",dest="labels", default=None, help="comma separated field labels")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--legend", default='upper left', help="default legend position")
parser.add_option("--legend2", default='upper right', help="default legend2 position")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
if len(args) < 2:
print("Usage: mavlogdump.py [options] <LOGFILES...> <fields...>")
sys.exit(1)
filenames = []
fields = []
for f in args:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey' ]
# work out msg types we are interested in
x = []
y = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars):
'''add some data'''
mtype = msg.get_type()
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
y[i].append(v)
x[i].append(t)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
vars = {}
while True:
msg = mlog.recv_match(opts.condition)
if msg is None: break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, mlog.messages)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if opts.labels is not None:
labels = opts.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
plotit(x, y, lab, colors=colors[fi*len(fields):])
for i in range(0, len(x)):
x[i] = []
y[i] = []
pylab.show()
raw_input('press enter to exit....')
| agpl-3.0 |
vibhorag/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
BitTiger-MP/DS502-AI-Engineer | DS502-1702/Jason_course/Week4_Codelab2/class1_softmax_regression.py | 1 | 2305 | import numpy as np
from sklearn import linear_model, datasets
import matplotlib.pyplot as plt
def onehot(y):
n = len(np.unique(y))
m = y.shape[0]
b = np.zeros((m, n))
for i in xrange(m):
b[i, y[i]] = 1
return b
def softmax(X):
return (np.exp(X).T / np.sum(np.exp(X), axis=1)).T
def h_func(theta, X):
h = np.dot(np.c_[np.ones(X.shape[0]), X], theta)
return softmax(h)
def h_gradient(theta, X, y, lam=0.1):
n = X.shape[0]
y_mat = onehot(y)
preds = h_func(theta, X)
return -1./n * np.dot(np.c_[np.ones(n), X].T, y_mat - preds) + lam * theta
def softmax_cost_func(theta, X, y, lam=0.1):
n = X.shape[0]
y_mat = onehot(y)
return -1./n * np.sum(y_mat * np.log(h_func(theta, X))) + lam/2. * np.sum(theta * theta)
# gradient descent
def softmax_grad_desc(theta, X, y, lr=.01, converge_change=.0001, max_iter=100, lam=0.1):
# normalize
# X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
cost_iter = []
cost = softmax_cost_func(theta, X, y, lam=lam)
cost_iter.append([0, cost])
change_cost = 1
i = 1
while change_cost > converge_change and i < max_iter:
pre_cost = cost
theta -= lr * h_gradient(theta, X, y)
cost = softmax_cost_func(theta, X, y)
cost_iter.append([i, cost])
change_cost = abs(pre_cost - cost)
i += 1
return theta, np.array(cost_iter)
def softmax_pred_val(theta, X):
probs = h_func(theta, X)
preds = np.argmax(probs, axis=1)
return probs, preds
def softmax_regression():
# Load the digits dataset
# 8x8 image of integer pixels in range 0..16
dataset = datasets.load_digits()
# Use all the features
X = dataset.data[:, :]
y = dataset.target[:, None]
print y
# Gradient Descent
theta = np.random.rand(X.shape[1]+1, len(np.unique(y)))
fitted_val, cost_iter = softmax_grad_desc(theta, X, y, lr=0.01, max_iter=1000, lam=0.1)
probs, preds = softmax_pred_val(fitted_val, X)
#print(fitted_val)
print(cost_iter[-1, :])
print('Accuracy: {}'.format(np.mean(preds[:, None] == y)))
plt.plot(cost_iter[:, 0], cost_iter[:, 1])
plt.ylabel("Cost")
plt.xlabel("Iteration")
plt.show()
def main():
softmax_regression()
if __name__ == "__main__":
main()
| apache-2.0 |
deepmind/deepmind-research | side_effects_penalties/results_summary.py | 1 | 7853 | # Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plot results for different side effects penalties.
Loads csv result files generated by `run_experiment' and outputs a summary data
frame in a csv file to be used for plotting by plot_results.ipynb.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from absl import app
from absl import flags
import pandas as pd
from side_effects_penalties.file_loading import load_files
FLAGS = flags.FLAGS
if __name__ == '__main__': # Avoid defining flags when used as a library.
flags.DEFINE_string('path', '', 'File path.')
flags.DEFINE_string('input_suffix', '',
'Filename suffix to use when loading data files.')
flags.DEFINE_string('output_suffix', '',
'Filename suffix to use when saving files.')
flags.DEFINE_bool('bar_plot', True,
'Make a data frame for a bar plot (True) ' +
'or learning curves (False)')
flags.DEFINE_string('env_name', 'box', 'Environment name.')
flags.DEFINE_bool('noops', True, 'Whether the environment includes noops.')
flags.DEFINE_list('beta_list', [0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0],
'List of beta values.')
flags.DEFINE_list('seed_list', [1], 'List of random seeds.')
flags.DEFINE_bool('compare_penalties', True,
'Compare different penalties using the best beta value ' +
'for each penalty (True), or compare different beta values '
+ 'for the same penalty (False).')
flags.DEFINE_enum('dev_measure', 'rel_reach',
['none', 'reach', 'rel_reach', 'att_util'],
'Deviation measure (used if compare_penalties=False).')
flags.DEFINE_enum('dev_fun', 'truncation', ['truncation', 'absolute'],
'Summary function for the deviation measure ' +
'(used if compare_penalties=False)')
flags.DEFINE_float('value_discount', 0.99,
'Discount factor for deviation measure value function ' +
'(used if compare_penalties=False)')
def beta_choice(baseline, dev_measure, dev_fun, value_discount, env_name,
beta_list, seed_list, noops=False, path='', suffix=''):
"""Choose beta value that gives the highest final performance."""
if dev_measure == 'none':
return 0.1
perf_max = float('-inf')
best_beta = 0.0
for beta in beta_list:
df = load_files(baseline=baseline, dev_measure=dev_measure,
dev_fun=dev_fun, value_discount=value_discount, beta=beta,
env_name=env_name, noops=noops, path=path, suffix=suffix,
seed_list=seed_list)
if df.empty:
perf = float('-inf')
else:
perf = df['performance_smooth'].mean()
if perf > perf_max:
perf_max = perf
best_beta = beta
return best_beta
def penalty_label(dev_measure, dev_fun, value_discount):
"""Penalty label specifying design choices."""
dev_measure_labels = {
'none': 'None', 'rel_reach': 'RR', 'att_util': 'AU', 'reach': 'UR'}
label = dev_measure_labels[dev_measure]
disc_lab = 'u' if value_discount == 1.0 else 'd'
dev_lab = ''
if dev_measure in ['rel_reach', 'att_util']:
dev_lab = 't' if dev_fun == 'truncation' else 'a'
if dev_measure != 'none':
label = label + '(' + disc_lab + dev_lab + ')'
return label
def make_summary_data_frame(
env_name, beta_list, seed_list, final=True, baseline=None, dev_measure=None,
dev_fun=None, value_discount=None, noops=False, compare_penalties=True,
path='', input_suffix='', output_suffix=''):
"""Make summary dataframe from multiple csv result files and output to csv."""
# For each of the penalty parameters (baseline, dev_measure, dev_fun, and
# value_discount), compare a list of multiple values if the parameter is None,
# or use the provided parameter value if it is not None
baseline_list = ['start', 'inaction', 'stepwise', 'step_noroll']
if dev_measure is not None:
dev_measure_list = [dev_measure]
else:
dev_measure_list = ['none', 'reach', 'rel_reach', 'att_util']
dataframes = []
for dev_measure in dev_measure_list:
# These deviation measures don't have a deviation function:
if dev_measure in ['reach', 'none']:
dev_fun_list = ['none']
elif dev_fun is not None:
dev_fun_list = [dev_fun]
else:
dev_fun_list = ['truncation', 'absolute']
# These deviation measures must be discounted:
if dev_measure in ['none', 'att_util']:
value_discount_list = [0.99]
elif value_discount is not None:
value_discount_list = [value_discount]
else:
value_discount_list = [0.99, 1.0]
for baseline in baseline_list:
for vd in value_discount_list:
for devf in dev_fun_list:
# Choose the best beta for this set of penalty parameters if
# compare_penalties=True, or compare all betas otherwise
if compare_penalties:
beta = beta_choice(
baseline=baseline, dev_measure=dev_measure, dev_fun=devf,
value_discount=vd, env_name=env_name, noops=noops,
beta_list=beta_list, seed_list=seed_list, path=path,
suffix=input_suffix)
betas = [beta]
else:
betas = beta_list
for beta in betas:
label = penalty_label(
dev_measure=dev_measure, dev_fun=devf, value_discount=vd)
df_part = load_files(
baseline=baseline, dev_measure=dev_measure, dev_fun=devf,
value_discount=vd, beta=beta, env_name=env_name,
noops=noops, path=path, suffix=input_suffix, final=final,
seed_list=seed_list)
df_part = df_part.assign(
baseline=baseline, dev_measure=dev_measure, dev_fun=devf,
value_discount=vd, beta=beta, env_name=env_name, label=label)
dataframes.append(df_part)
df = pd.concat(dataframes, sort=False)
# Output summary data frame
final_str = '_final' if final else ''
if compare_penalties:
filename = ('df_summary_penalties_' + env_name + final_str +
output_suffix + '.csv')
else:
filename = ('df_summary_betas_' + env_name + '_' + dev_measure + '_' +
dev_fun + '_' + str(value_discount) + final_str + output_suffix
+ '.csv')
f = os.path.join(path, filename)
df.to_csv(f)
return df
def main(unused_argv):
compare_penalties = FLAGS.compare_penalties
dev_measure = None if compare_penalties else FLAGS.dev_measure
dev_fun = None if compare_penalties else FLAGS.dev_fun
value_discount = None if compare_penalties else FLAGS.value_discount
make_summary_data_frame(
compare_penalties=compare_penalties, env_name=FLAGS.env_name,
noops=FLAGS.noops, final=FLAGS.bar_plot, dev_measure=dev_measure,
value_discount=value_discount, dev_fun=dev_fun, path=FLAGS.path,
input_suffix=FLAGS.input_suffix, output_suffix=FLAGS.output_suffix,
beta_list=FLAGS.beta_list, seed_list=FLAGS.seed_list)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
BerryAI/Acai | OpenMRS/API/user.py | 1 | 2005 | """
user.py
~~~
This module provides all API for users
:auther: Alexander Z Wang
"""
import sklearn
major_version, minor_version = tuple(sklearn.__version__.split('.')[:2])
if int(minor_version) >= 18 or int(major_version) >= 1:
from sklearn.neural_network import MLPClassifier
user_model_type = 'mlp'
else:
from sklearn.linear_model import LinearRegression, LogisticRegression
user_model_type = 'linear regression'
# Logistic regression (or more generally, classification models) is
# problematic when training labels for a user
# lack diversity (e.g. only rated 3 songs, all are 'like').
def train_user_taste_model(track_hidden_features, user_ratings):
"""Get user taste model for prediction
:param track_hidden_feature: hidden feature matrix of listened tracks
:param user_rating: user rating vector of listened tracks
:return user_model: user taste model classifiers
:rtype: classifiers
"""
if user_model_type == 'logistic regression':
user_model = LogisticRegression()
elif user_model_type == 'linear regression':
user_model = LinearRegression()
elif user_model_type == 'MLP':
user_model = MLPClassifier(
hidden_layer_sizes=(5,), max_iter=3000, alpha=1e-2,
algorithm='sgd', verbose=False, tol=1e-4, random_state=1,
learning_rate_init=.01, activation='tanh')
user_model.fit(X=track_hidden_features, y=user_ratings.ravel())
return user_model
def predict_rating(user_model, track_hidden_features):
"""predict user rating score by user model
:param user_model: user taste model classifiers
:param track_hidden_features: hidden feature matrix of to be predicted
:return prediction: predicted user rating score
:rtype: list
"""
prediction = user_model.predict(track_hidden_features.reshape(1, -1))
prediction = np.clip(prediction.astype(np.int32), 1, 5)
prediction = prediction.tolist()
return prediction
| mit |
gef756/statsmodels | statsmodels/sandbox/tools/mctools.py | 33 | 17175 | '''Helper class for Monte Carlo Studies for (currently) statistical tests
Most of it should also be usable for Bootstrap, and for MC for estimators.
Takes the sample generator, dgb, and the statistical results, statistic,
as functions in the argument.
Author: Josef Perktold (josef-pktd)
License: BSD-3
TODOs, Design
-------------
If we only care about univariate analysis, i.e. marginal if statistics returns
more than one value, the we only need to store the sorted mcres not the
original res. Do we want to extend to multivariate analysis?
Use distribution function to keep track of MC results, ECDF, non-paramatric?
Large parts are similar to a 2d array of independent multivariate random
variables. Joint distribution is not used (yet).
I guess this is currently only for one sided test statistics, e.g. for
two-sided tests basend on t or normal distribution use the absolute value.
'''
from __future__ import print_function
from statsmodels.compat.python import lrange
import numpy as np
from statsmodels.iolib.table import SimpleTable
#copied from stattools
class StatTestMC(object):
"""class to run Monte Carlo study on a statistical test'''
TODO
print(summary, for quantiles and for histogram
draft in trying out script log
Parameters
----------
dgp : callable
Function that generates the data to be used in Monte Carlo that should
return a new sample with each call
statistic : callable
Function that calculates the test statistic, which can return either
a single statistic or a 1d array_like (tuple, list, ndarray).
see also statindices in description of run
Attributes
----------
many methods store intermediate results
self.mcres : ndarray (nrepl, nreturns) or (nrepl, len(statindices))
Monte Carlo results stored by run
Notes
-----
.. Warning::
This is (currently) designed for a single call to run. If run is
called a second time with different arguments, then some attributes might
not be updated, and, therefore, not correspond to the same run.
.. Warning::
Under Construction, don't expect stability in Api or implementation
Examples
--------
Define a function that defines our test statistic:
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
Note lb returns eight values.
Define a random sample generator, for example 500 independently, normal
distributed observations in a sample:
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
Create instance and run Monte Carlo. Using statindices=list(range(4)) means that
only the first for values of the return of the statistic (lb) are stored
in the Monte Carlo results.
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=list(range(4)))
Most of the other methods take an idx which indicates for which columns
the results should be presented, e.g.
print(mc1.cdf(crit, [1,2,3])[1]
"""
def __init__(self, dgp, statistic):
self.dgp = dgp #staticmethod(dgp) #no self
self.statistic = statistic # staticmethod(statistic) #no self
def run(self, nrepl, statindices=None, dgpargs=[], statsargs=[]):
'''run the actual Monte Carlo and save results
Parameters
----------
nrepl : int
number of Monte Carlo repetitions
statindices : None or list of integers
determines which values of the return of the statistic
functions are stored in the Monte Carlo. Default None
means the entire return. If statindices is a list of
integers, then it will be used as index into the return.
dgpargs : tuple
optional parameters for the DGP
statsargs : tuple
optional parameters for the statistics function
Returns
-------
None, all results are attached
'''
self.nrepl = nrepl
self.statindices = statindices
self.dgpargs = dgpargs
self.statsargs = statsargs
dgp = self.dgp
statfun = self.statistic # name ?
#introspect len of return of statfun,
#possible problems with ndim>1, check ValueError
mcres0 = statfun(dgp(*dgpargs), *statsargs)
self.nreturn = nreturns = len(np.ravel(mcres0))
#single return statistic
if statindices is None:
#self.nreturn = nreturns = 1
mcres = np.zeros(nrepl)
mcres[0] = mcres0
for ii in range(1, repl-1, nreturns):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
#should I ravel?
mcres[ii] = statfun(x, *statsargs) #unitroot_adf(x, 2,trendorder=0, autolag=None)
#more than one return statistic
else:
self.nreturn = nreturns = len(statindices)
self.mcres = mcres = np.zeros((nrepl, nreturns))
mcres[0] = [mcres0[i] for i in statindices]
for ii in range(1, nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
ret = statfun(x, *statsargs)
mcres[ii] = [ret[i] for i in statindices]
self.mcres = mcres
def histogram(self, idx=None, critval=None):
'''calculate histogram values
does not do any plotting
I don't remember what I wanted here, looks similar to the new cdf
method, but this also does a binned pdf (self.histo)
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
if critval is None:
histo = np.histogram(mcres, bins=10)
else:
if not critval[0] == -np.inf:
bins=np.r_[-np.inf, critval, np.inf]
if not critval[0] == -np.inf:
bins=np.r_[bins, np.inf]
histo = np.histogram(mcres,
bins=np.r_[-np.inf, critval, np.inf])
self.histo = histo
self.cumhisto = np.cumsum(histo[0])*1./self.nrepl
self.cumhistoreversed = np.cumsum(histo[0][::-1])[::-1]*1./self.nrepl
return histo, self.cumhisto, self.cumhistoreversed
#use cache decorator instead
def get_mc_sorted(self):
if not hasattr(self, 'mcressort'):
self.mcressort = np.sort(self.mcres, axis=0)
return self.mcressort
def quantiles(self, idx=None, frac=[0.01, 0.025, 0.05, 0.1, 0.975]):
'''calculate quantiles of Monte Carlo results
similar to ppf
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
frac : ndarray
same values as input, TODO: I should drop this again ?
quantiles : ndarray, (len(frac), len(idx))
the quantiles with frac in rows and idx variables in columns
Notes
-----
rename to ppf ? make frac required
change sequence idx, frac
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
self.frac = frac = np.asarray(frac)
mc_sorted = self.get_mc_sorted()[:,idx]
return frac, mc_sorted[(self.nrepl*frac).astype(int)]
def cdf(self, x, idx=None):
'''calculate cumulative probabilities of Monte Carlo results
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
Defines which quantiles should be calculated. For example a frac
of 0.1 finds the 10% quantile, x such that cdf(x)=0.1
Returns
-------
x : ndarray
same as input, TODO: I should drop this again ?
probs : ndarray, (len(x), len(idx))
the quantiles with frac in rows and idx variables in columns
'''
idx = np.atleast_1d(idx).tolist() #assure iterable, use list ?
# if self.mcres.ndim == 2:
# if not idx is None:
# mcres = self.mcres[:,idx]
# else:
# raise ValueError('currently only 1 statistic at a time')
# else:
# mcres = self.mcres
mc_sorted = self.get_mc_sorted()
x = np.asarray(x)
#TODO:autodetect or explicit option ?
if x.ndim > 1 and x.shape[1]==len(idx):
use_xi = True
else:
use_xi = False
x_ = x #alias
probs = []
for i,ix in enumerate(idx):
if use_xi:
x_ = x[:,i]
probs.append(np.searchsorted(mc_sorted[:,ix], x_)/float(self.nrepl))
probs = np.asarray(probs).T
return x, probs
def plot_hist(self, idx, distpdf=None, bins=50, ax=None, kwds=None):
'''plot the histogram against a reference distribution
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distpdf : callable
probability density function of reference distribution
bins : integer or array_like
used unchanged for matplotlibs hist call
ax : TODO: not implemented yet
kwds : None or tuple of dicts
extra keyword options to the calls to the matplotlib functions,
first dictionary is for his, second dictionary for plot of the
reference distribution
Returns
-------
None
'''
if kwds is None:
kwds = ({},{})
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
lsp = np.linspace(mcres.min(), mcres.max(), 100)
import matplotlib.pyplot as plt
#I don't want to figure this out now
# if ax=None:
# fig = plt.figure()
# ax = fig.addaxis()
fig = plt.figure()
plt.hist(mcres, bins=bins, normed=True, **kwds[0])
plt.plot(lsp, distpdf(lsp), 'r', **kwds[1])
def summary_quantiles(self, idx, distppf, frac=[0.01, 0.025, 0.05, 0.1, 0.975],
varnames=None, title=None):
'''summary table for quantiles (critical values)
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
distppf : callable
probability density function of reference distribution
TODO: use `crit` values instead or additional, see summary_cdf
frac : array_like, float
probabilities for which
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
quant, mcq = self.quantiles(idx, frac=frac)
#not sure whether this will work with single quantile
#crit = stats.chi2([2,4]).ppf(np.atleast_2d(quant).T)
crit = distppf(np.atleast_2d(quant).T)
mml=[]
for i, ix in enumerate(idx): #TODO: hardcoded 2 ?
mml.extend([mcq[:,i], crit[:,i]])
#mmlar = np.column_stack(mml)
mmlar = np.column_stack([quant] + mml)
#print(mmlar.shape
if title:
title = title +' Quantiles (critical values)'
else:
title='Quantiles (critical values)'
#TODO use stub instead
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]//2)]
headers = ['\nprob'] + ['%s\n%s' % (i, t) for i in varnames for t in ['mc', 'dist']]
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(mmlar.shape[1]-1)},
title=title,
headers=headers)
def summary_cdf(self, idx, frac, crit, varnames=None, title=None):
'''summary table for cumulative density function
Parameters
----------
idx : None or list of integers
List of indices into the Monte Carlo results (columns) that should
be used in the calculation
frac : array_like, float
probabilities for which
crit : array_like
values for which cdf is calculated
varnames : None, or list of strings
optional list of variable names, same length as idx
Returns
-------
table : instance of SimpleTable
use `print(table` to see results
'''
idx = np.atleast_1d(idx) #assure iterable, use list ?
mml=[]
#TODO:need broadcasting in cdf
for i in range(len(idx)):
#print(i, mc1.cdf(crit[:,i], [idx[i]])[1].ravel()
mml.append(self.cdf(crit[:,i], [idx[i]])[1].ravel())
#mml = self.cdf(crit, idx)[1]
#mmlar = np.column_stack(mml)
#print(mml[0].shape, np.shape(frac)
mmlar = np.column_stack([frac] + mml)
#print(mmlar.shape
if title:
title = title +' Probabilites'
else:
title='Probabilities'
#TODO use stub instead
#headers = ['\nprob'] + ['var%d\n%s' % (i, t) for i in range(mmlar.shape[1]-1) for t in ['mc']]
if varnames is None:
varnames = ['var%d' % i for i in range(mmlar.shape[1]-1)]
headers = ['prob'] + varnames
return SimpleTable(mmlar,
txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(np.array(mml).shape[1]-1)},
title=title,
headers=headers)
if __name__ == '__main__':
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from statsmodels.sandbox.stats.diagnostic import (
acorr_ljungbox, unitroot_adf)
def randwalksim(nobs=100, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def adf20(x):
return unitroot_adf(x, 2,trendorder=0, autolag=None)
# print('\nResults with MC class'
# mc1 = StatTestMC(randwalksim, adf20)
# mc1.run(1000)
# print(mc1.histogram(critval=[-3.5, -3.17, -2.9 , -2.58, 0.26])
# print(mc1.quantiles()
print('\nLjung Box')
from statsmodels.sandbox.stats.diagnostic import acorr_ljungbox
def lb4(x):
s,p = acorr_ljungbox(x, lags=4)
return s[-1], p[-1]
def lb1(x):
s,p = acorr_ljungbox(x, lags=1)
return s[0], p[0]
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
print('Results with MC class')
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(10000, statindices=lrange(8))
print(mc1.histogram(1, critval=[0.01, 0.025, 0.05, 0.1, 0.975]))
print(mc1.quantiles(1))
print(mc1.quantiles(0))
print(mc1.histogram(0))
#print(mc1.summary_quantiles([1], stats.chi2([2]).ppf, title='acorr_ljungbox')
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(0.1026, 1))
print(mc1.cdf(0.7278, 3))
print(mc1.cdf(0.7278, [1,2,3]))
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,3], frac, crit, title='acorr_ljungbox'))
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1].shape)
#fixed broadcasting in cdf Done 2d only
'''
>>> mc1.cdf(crit[:,0], [1])[1].shape
(5, 1)
>>> mc1.cdf(crit[:,0], [1,3])[1].shape
(5, 2)
>>> mc1.cdf(crit[:,:], [1,3])[1].shape
(2, 5, 2)
'''
doplot=0
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist(0,stats.chi2(2).pdf) #which pdf
plt.show()
| bsd-3-clause |
simon-pepin/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
cojacoo/echoRD_model | echoRD/hydro_tools.py | 1 | 49640 | #hydro_plot - some python functions for hydrological visualisation
#(cc) jackisch@kit.edu
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib
#TOOLS
# a simple progress bar
import sys, time
try:
from IPython.display import clear_output
have_ipython = True
except ImportError:
have_ipython = False
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 40
self.__update_amount(0)
if have_ipython:
self.animate = self.animate_ipython
else:
self.animate = self.animate_noipython
def animate_ipython(self, iter):
print '\r', self,
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
#euler richards solver
def darcy(psi,z,k):
phi=psi+z
dz=np.diff(z)
Q=np.sqrt(k[:-1]*k[1:])*np.diff(phi)/dz
return Q
def euler(psi_l,c_l,q,qb_u,qb_l,dt,z):
dz=np.diff(z)
#psinew=psi_l[:-1]+dt/(c_l[:-1]*dz*dz)*np.append(qb_u,np.diff(q))
psinew=psi_l[:-1]+dt/(c_l[:-1]*dz*dz)*np.diff(np.append((qb_u+q[0])/2.,q))
psilow=psi_l[-1]+dt/(c_l[-1]*dz[-1])*qb_l
return np.append(psinew,psilow)
def richards(t_end,psi,mc,vG):
time=0.
soil=mc.soilgrid[:,1]-1
dzmn=mc.mgrid.latfac.values
while time<t_end:
k=vG.ku_psi(psi, mc.soilmatrix.ks[soil], mc.soilmatrix.alpha[soil], mc.soilmatrix.n[soil]).values
c=vG.c_psi(psi, mc.soilmatrix.ts[soil], mc.soilmatrix.tr[soil], mc.soilmatrix.alpha[soil], mc.soilmatrix.n[soil]).values
q=darcy(psi,mc.zgrid[:,1],k)
dt=np.amin([0.1,0.05*dzmn/np.amax(np.abs(q))])
#predictor
psinew=euler(psi,c,q,0.,0.,dt*0.5,mc.zgrid[:,1])
k=vG.ku_psi(psinew, mc.soilmatrix.ks[soil], mc.soilmatrix.alpha[soil], mc.soilmatrix.n[soil]).values
c=vG.c_psi(psinew, mc.soilmatrix.ts[soil], mc.soilmatrix.tr[soil], mc.soilmatrix.alpha[soil], mc.soilmatrix.n[soil]).values
q=darcy(psinew,mc.zgrid[:,1],k)
#corrector
psinew=euler(psinew,c,q,0.,0.,dt,mc.zgrid[:,1])
psi=psinew
time=time+dt
return psi
#PLOTTING
#define plot function for soil moisture data
def hydroplot(obs,mlab,mlabcols,fsize=(6, 6),saving=False,upbound=40,lowbound=10,catch='Catchment',precscale=100.,cumprec=False,align=False,tspacehr=6,ccontrast=False,descriptor='Sensor\nLocation'):
'''
This is a rather simple function to plot hydrological data (soil moisture and precipitation) of a pandas data frame.
It is based on some excellent examples by Randy Olson and may need heavy adaption to your data.
(CC BY-NC-SA 4.0) jackisch@kit.edu
fsize: Provide figure size as tuple.
saving: Provide a file name if you want it saving.
XXbound: Give bounds of left axis.
catch: Provide catchment name.
precscale: Scaling if your prec data is not in mm.
cumprec: True: cumulative precipitation is plotted.
The functions assumes a pandas data frame with a time stamp as row names.
You may prepare this as:
obs=pd.read_csv('soil_moisture_file.csv')
obs.index=pd.to_datetime(obs['DATE'] + ' ' + obs['TIME'],format='%d/%m/%y %H:%M')
obs = obs.drop(['DATE','TIME'], 1)
Moreover, precipitation should reside in column 'Prec'
'''
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plt.figure(figsize=fsize)
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.ylim(lowbound, upbound)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.yticks(range(lowbound, upbound, 10), [str(x) + "%" for x in range(lowbound, upbound, 10)], fontsize=14)
plt.xticks(fontsize=14)
ax.xaxis.set_minor_locator(matplotlib.dates.HourLocator(interval=tspacehr))
ax.xaxis.set_minor_formatter(matplotlib.dates.DateFormatter('\n%H'))
ax.xaxis.grid(False, which="minor")
ax.xaxis.grid(True, which="major")
ax.xaxis.set_major_locator(matplotlib.dates.DayLocator())
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('\n\n%d.%m.%y'))
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = mlab
colnames=mlabcols
#get positions
yposlab=obs[colnames].values[-1,:]
yposlab=np.round(yposlab)
if align:
while any(np.diff(yposlab)==0.):
overlap=np.diff(yposlab)
overlapavoid=0.*overlap
overlapavoid[overlap==0.]=1.
overlap[np.where(overlap==0.)[0]+1][overlap[np.where(overlap==0.)[0]+1]==1.]=overlapavoid[np.where( overlap==0.)[0]+1]+1.
yposlab[1:]=yposlab[1:]+overlapavoid
else:
if any(np.diff(yposlab)<2.):
yposlab=upbound-(np.arange(len(yposlab))*2.+15.)
for rank, column in enumerate(majors):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
if ccontrast:
plt.plot(obs.index, obs[colnames[rank]].values, lw=2.5, color=tableau20[rank*2+2])
else:
plt.plot(obs.index, obs[colnames[rank]].values, lw=2.5, color=tableau20[rank+2])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
if ccontrast:
plt.text(obs.index[-1]+ pd.datetools.timedelta(hours=0.1), yposlab[rank], column, fontsize=14, color=tableau20[rank*2+2])
else:
plt.text(obs.index[-1]+ pd.datetools.timedelta(hours=0.1), yposlab[rank], column, fontsize=14, color=tableau20[rank+2])
for y in range(10, upbound, 10):
plt.axhline(y, lw=0.5, color="black", alpha=0.3)
plt.text(obs.index[len(obs)/2], upbound+2, ''.join(["Soil Moisture and Precipitation at ",catch]) , fontsize=17, ha="center")
plt.text(obs.index[0]- pd.datetools.timedelta(hours=0.1),32,'< Soil Moisture\n',rotation='vertical',horizontalalignment='right',verticalalignment='bottom',fontsize=12,alpha=0.7)
plt.text(obs.index[0]- pd.datetools.timedelta(hours=0.1),32,'> Precipitation',rotation='vertical',horizontalalignment='right',verticalalignment='bottom',fontsize=12,color=tableau20[0],alpha=0.7)
plt.text(obs.index[-1]+ pd.datetools.timedelta(hours=0.1),10,descriptor,rotation='vertical',verticalalignment='bottom',fontsize=12,alpha=0.7)
plt.text(obs.index[-1]+ pd.datetools.timedelta(hours=0.1),7.55,'Hr',verticalalignment='bottom',fontsize=12, alpha=0.7)
plt.text(obs.index[-1]+ pd.datetools.timedelta(hours=0.1),5.6,'Date',verticalalignment='bottom',fontsize=14, alpha=0.7)
ax2 = ax.twinx()
if cumprec:
obs['Preccum']=np.cumsum(obs['Prec'].values)
obs=obs.rename(columns={'Prec': 'Prec_m','Preccum': 'Prec'})
precstp=np.around(obs['Prec'].max()/4.,decimals=-int(np.floor(np.log10(obs['Prec'].max()/4.))))
ax2.set_ylim((-obs['Prec'].max()*3.,1))
ax2.set_yticks(-np.arange(0,obs['Prec'].max(), precstp)[::-1])
ax2.set_yticklabels([str(x) + "mm" for x in np.arange(0.,obs['Prec'].max(), precstp)/precscale][::-1], fontsize=14,color=tableau20[0])
ax2.plot(obs.index, -obs['Prec'], color=tableau20[0])
ax2.fill_between(obs.index, -obs['Prec'], 0., color=tableau20[1])
ax2.yaxis.grid(True)
if saving!=False:
plt.savefig(saving, bbox_inches="tight")
def hydroprofile(obs,obs2=None,fsize=(6, 6),xbound=[0.,1.],ybound=[0.,1.],ptitle='Plot',xlab='feature',saving=False,ccontrast=False,colors=None,compress=True,compress2=False,ebar=False):
'''
This is a rather simple function to plot hydrological data (profile) of a pandas data frame.
It is based on some excellent examples by Randy Olson and may need heavy adaption to your data.
(CC BY-NC-SA 4.0) jackisch@kit.edu
obs: dataframe to plot, index is y axis
mlab: labels for columns
mlabcols: columns to plot
fsize: Provide figure size as tuple.
saving: Provide a file name if you want it saving.
XXbound: Give bounds of axis and separations.
ptitle: Plot title
ccontrast: option to only use high contrast values from palette
colors: option to pass own colors
compress: compress obs to mean with errorbar
compress2: compress also simulations to mean with errorbar
'''
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
if colors is None:
colors=tableau20
plt.figure(figsize=fsize)
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(111)
ax.spines["top"].set_visible(True)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(True)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary chartjunk.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.xlim(xbound[0], xbound[1])
plt.ylim(ybound[0], ybound[1])
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.yticks(np.linspace(ybound[0], ybound[1], ybound[2]), [str(x) for x in np.linspace(ybound[0], ybound[1], ybound[2])], fontsize=14)
plt.xticks(np.linspace(xbound[0], xbound[1], xbound[2]), [str(x) for x in np.linspace(xbound[0], xbound[1], xbound[2])],fontsize=14)
plt.ylabel('depth [m]')
plt.xlabel(xlab)
ax.xaxis.grid(True)
ax.yaxis.grid(True)
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
if compress:
plt.plot(obs.mean(axis=1).values,obs.index, lw=2., color=colors[0], label='observed')
if ebar==True:
plt.errorbar(obs.mean(axis=1).values,obs.index,xerr=obs.std(axis=1).values, lw=1., color=colors[0])
else:
plt.fill_betweenx(np.array(obs.index,dtype=float),(obs.mean(axis=1)-obs.std(axis=1)).values,(obs.mean(axis=1)+obs.std(axis=1)).values, color=colors[0],alpha=0.3)
offset=2
if (obs2 is not None):
if compress2:
plt.plot(obs2.mean(axis=1).values,obs2.index, lw=2., color=colors[2], label='simulated')
plt.errorbar(obs2.mean(axis=1).values,obs2.index,xerr=obs2.std(axis=1).values, lw=1., color=colors[2])
else:
for rank in range(len(obs2.columns)):
plt.plot(obs2.iloc[:,rank].values,obs2.index, lw=2., color=colors[rank+offset], label=str(obs2.columns[rank]))
else:
for rank in range(len(obs.columns)):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
if ccontrast:
plt.plot(obs.iloc[:,rank].values,obs.index, lw=2., color=colors[rank*2], label=str(obs.columns[rank]))
else:
plt.plot(obs.iloc[:,rank].values,obs.index, lw=2., color=colors[rank], label=str(obs.columns[rank]))
offset=len(obs.columns)
if obs2 is not None:
for rank in range(len(obs2.columns)):
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
if ccontrast:
plt.plot(obs2.iloc[:,rank].values,obs2.index, lw=2., color=colors[rank*2+offset], label=str(obs2.columns[rank]))
else:
plt.plot(obs2.iloc[:,rank].values,obs2.index, lw=2., color=colors[rank+offset], label=str(obs2.columns[rank]))
plt.title(ptitle,fontsize=16)
plt.legend(loc=4,frameon=False,ncol=int(np.ceil(len(obs2.columns)/8.)))
if saving!=False:
plt.savefig(saving, bbox_inches="tight")
def oneDplot(particles,obsx,theta_r,theta_re,gridupdate_thS1D,pdyn,vG,dt,sigma,runname,ti,i,mc,saving=False,t=1,store=False,fsize=(8, 5),xlimset=[0.15,0.3,2],ad_diff=False):
#plot 1D profile
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
thS=gridupdate_thS1D(particles.cell,mc,pdyn)
theta_p=vG.theta_thst(thS/100., mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpx=gaussian_filter1d(theta_p,sigma)
if ad_diff:
thSdiff=gridupdate_thS1D(particles.cell[particles.flag==0],mc,pdyn)
theta_pdiff=vG.theta_thst(thSdiff/100., mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpxdiff=gaussian_filter1d(theta_pdiff,sigma)
n=len(particles)
obs_id=np.argmin([np.abs(obsx.index[x]-ti) for x in range(len(obsx))])
probloc=[-0.03,-0.1,-0.2,-0.3,-0.4]
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(1, 2, width_ratios=[2,1])
plt.subplot(gs[0])
plt.plot(thpx,mc.zgrid[:,1],label='Particle')
if ad_diff:
plt.plot(thpxdiff,mc.zgrid[:,1],label='Particle_diffusive')
plt.plot(theta_r,mc.zgrid[:,1],label='Rich SimpegFlow')
plt.plot(theta_re,mc.zgrid[:,1],label='Rich Euler')
plt.plot(obsx.iloc[obs_id]/100.,probloc,'.',label='Observation')
plt.legend(loc=4)
#text(0.35, -0.4, ''.join(['t=',str(int(t)),'m']), fontsize=12)
#text(0.3, -0.5, ''.join(['particles: ',str(n)]), fontsize=12)
plt.xlim(xlimset[:2])
plt.xticks(np.arange(xlimset[0],xlimset[1],xlimset[2]))
plt.xlabel('theta [m3/m3]')
plt.ylabel('depth [m]')
#title(''.join(['Model and Obs @ ',str(int(ti)),'s']))
plt.title(''.join(['Model and Observation\nTime: ',str(int(t)),'min']))
#title(''.join(['echoRD1D @ ',str(int(ti)),'s']))
ax1=plt.subplot(gs[1])
zi=np.arange(-0.0,mc.soildepth-0.01,-0.01)
oldp=np.bincount(np.round(np.append(-particles.z[particles.flag==0].values,-zi)*100.).astype(int))-1
allp=np.bincount(np.round(np.append(-particles.z[particles.flag<2].values,-zi)*100.).astype(int))-1
plt.plot(gaussian_filter1d(oldp,sigma),zi,label='old')
plt.plot(gaussian_filter1d(allp[0:len(oldp)],sigma),zi,label='all')
plt.plot(gaussian_filter1d(allp[0:len(oldp)]-oldp,sigma),zi,label='new')
a=np.ceil(n/1000.)*12.
plt.xlim([0,a])
plt.xticks(np.linspace(0,a,4))
plt.xlabel('particles')
plt.legend(loc=4)
#title(''.join(['Max Peclet=',str(np.round(Pe,2))]))
plt.title(''.join(['total:\n',str(n)]))
ax1.yaxis.tick_right()
if saving:
plt.savefig(''.join(['./results/',runname,str(i).zfill(3),'.pdf']))
plt.close(fig)
if store:
idz=[0,10,20,30,40]
if ad_diff:
return [obsx.values[obs_id]/100., thpx[idz],theta_re.values[idz],theta_r[idz],thpxdiff[idz]]
else:
return [obsx.values[obs_id]/100., thpx[idz],theta_re.values[idz],theta_r[idz]]
def oneDplot2(particles,obsx,theta_r,theta_re,dt,sigma,runname,ti,i,mc,saving=False,t=1,store=False,fsize=(8, 5),xlimset=[0.15,0.3,2],ad_diff=False):
#plot 1D profile
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
[thS,npart]=gridupdate_thS1D(particles.cell,mc,pdyn)
theta_p=vG.theta_thst(thS/100., mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpx=gaussian_filter1d(theta_p,sigma)
if ad_diff:
[thSdiff,npartdiff]=gridupdate_thS1D(particles.cell[particles.flag==0],mc,pdyn)
theta_pdiff=vG.theta_thst(thSdiff/100., mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpxdiff=gaussian_filter1d(theta_pdiff,sigma)
n=len(particles)
obs_id=np.argmin([np.abs(obsx.index[x]-ti) for x in range(len(obsx))])
probloc=[-0.03,-0.1,-0.2,-0.3,-0.4]
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(1, 4, width_ratios=[2,1,0.6,0.6])
subplot(gs[0])
plot(thpx,mc.zgrid[:,1],label='Particle')
if ad_diff:
plot(thpxdiff,mc.zgrid[:,1],label='Particle_diffusive')
plot(theta_r,mc.zgrid[:,1],label='Rich SimpegFlow')
plot(theta_re,mc.zgrid[:,1],label='Rich Euler')
plot(obsx.iloc[obs_id]/100.,probloc,'.',label='Observation')
legend(loc=4)
#text(0.35, -0.4, ''.join(['t=',str(int(t)),'m']), fontsize=12)
#text(0.3, -0.5, ''.join(['particles: ',str(n)]), fontsize=12)
xlim(xlimset[:2])
xticks(np.arange(xlimset[0],xlimset[1],xlimset[2]))
xlabel('theta [m3/m3]')
ylabel('depth [m]')
#title(''.join(['Model and Obs @ ',str(int(ti)),'s']))
title(''.join(['Model and Observation\nTime: ',str(int(t)),'min']))
#title(''.join(['echoRD1D @ ',str(int(ti)),'s']))
ax1=subplot(gs[1])
zi=np.arange(-0.0,mc.soildepth-0.01,-0.01)
oldp=np.bincount(np.round(np.append(-particles.z[particles.flag==0].values,-zi)*100.).astype(int))-1
allp=np.bincount(np.round(np.append(-particles.z[particles.flag<2].values,-zi)*100.).astype(int))-1
plot(gaussian_filter1d(oldp,sigma),zi,label='old')
plot(gaussian_filter1d(allp[0:len(oldp)],sigma),zi,label='all')
plot(gaussian_filter1d(allp[0:len(oldp)]-oldp,sigma),zi,label='new')
a=np.ceil(n/1000.)*12.
xlim([0,a])
xticks(np.linspace(0,a,4))
xlabel('particles')
legend(loc=4)
#title(''.join(['Max Peclet=',str(np.round(Pe,2))]))
title(''.join(['total:\n',str(n)]))
ax1.get_yaxis().set_visible(False)
ax2=subplot(gs[2])
#a=(np.abs(particles.groupby(['cell'], sort=True).max().lastZ.values-particles.groupby(['cell'], sort=True).min().z.values)/dt)/np.sqrt(mc.D[np.amax(thS),4])
#e=particles.groupby(['cell'], sort=True).mean().lastD.values*mc.mgrid.vertfac.values/mc.D[np.amax(thS),4]
a=(np.abs(mc.mgrid.vertfac.values)*np.abs(particles.groupby(['cell'], sort=True).mean().lastZ.values-particles.groupby(['cell'], sort=True).mean().z.values)/dt)/mc.D[np.amax(thS),4]
e=(np.abs(mc.mgrid.vertfac.values)*np.abs(particles.groupby(['cell'], sort=True).max().lastZ.values-particles.groupby(['cell'], sort=True).min().z.values)/dt)/mc.D[np.amax(thS),4]
plot(a,mc.zgrid[:,1],label='diff')
#plot(e,mc.zgrid[:,1],alpha=0.5, color='b')
#fill_betweenx(mc.zgrid[:,1], a, e,alpha=0.2)
#plot(u,mc.zgrid[:,1],color='adv')
#errorbar(a,mc.zgrid[:,1],xerr=a-e, ecolor='lightblue')
xlim([-0.05,np.ceil(np.amax([np.amax(a),0.05])*100.)/100.])
xticks(np.linspace(0.,np.ceil(np.amax([np.amax(a),0.05])*100.)/100.,2))
ax2.get_yaxis().set_visible(False)
#xscale('log')
plt.xlabel('u(i,z)/D(z)')
plt.title('Peclet\n(mean)')
ax3=plt.subplot(gs[3])
plot(e,mc.zgrid[:,1])
xlim([-0.05,np.ceil(np.amax([np.amax(e),0.05])*100.)/100.])
xticks(np.linspace(0.,np.ceil(np.amax([np.amax(e),0.05])*100.)/100.,2))
#xscale('log')
xlabel('u(i,z)/D(z)')
title('Peclet\n(max)')
ax3.yaxis.tick_right()
if saving:
plt.savefig(''.join(['./results/',runname,str(i).zfill(3),'.pdf']))
plt.close(fig)
if store:
idz=[0,10,20,30,40]
if ad_diff:
return [obsx.values[obs_id]/100., thpx[idz],theta_re.values[idz],theta_r[idz],thpxdiff[idz]]
else:
return [obsx.values[obs_id]/100., thpx[idz],theta_re.values[idz],theta_r[idz]]
def plotparticles_t_obs(particles,obsx,thS,mc,vG,runname='test',t=0.,ix=0,sigma=0.5,fsize=(8, 8),saving=False,store=False):
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(2, 3, width_ratios=[2,1,1], height_ratios=[1,5])
ax1 = plt.subplot(gs[0])
ax11 = ax1.twinx()
advect_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age>0.)),'lat'].values).astype(np.int))
old_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age<=0.)),'lat'].values).astype(np.int))
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:],'b-')
ax11.plot((np.arange(0,len(old_dummy))/100.)[1:],old_dummy[1:],'g-')
ax11.set_ylabel('Particles', color='g')
ax11.set_xlim([0.,mc.mgrid.width.values])
ax1.set_xlim([0.,mc.mgrid.width.values])
ax1.set_ylabel('New Particles', color='b')
ax1.set_xlabel('Lat [m]')
ax1.set_title('Lateral Marginal Count')
ax2 = plt.subplot(gs[1:2])
ax2.axis('off')
ax2.text(0.1, 0.5, 'time: '+str(np.round(t/60.,1))+'min', fontsize=17)
ax2.text(0.1, 0.2, 'particles: '+str(sum(particles.z>mc.soildepth)), fontsize=17)
ax2.text(0.1, 0.8, runname, fontsize=17)
ax3 = plt.subplot(gs[3])
plt.imshow(sp.ndimage.filters.median_filter(thS,size=mc.smooth),vmin=0., vmax=1., cmap='Blues')
#plt.imshow(npart)
plt.colorbar()
plt.xlabel('Width [cells a 5 mm]')
plt.ylabel('Depth [cells a 5 mm]')
plt.title('Particle Density')
plt.tight_layout()
ax4 = plt.subplot(gs[4])
#ax41 = ax4.twiny()
onez=np.append(mc.zgrid[:,1]+0.001,mc.soildepth)
z1=np.append(particles.loc[((particles.age>0.)),'z'].values,onez)
advect_dummy=np.bincount(np.round(-100.0*z1).astype(np.int))-1
z2=np.append(particles.loc[((particles.age<=0.)),'z'].values,onez)
old_dummy=np.bincount(np.round(-100.0*z2).astype(np.int))-1
ax4.plot(advect_dummy,(np.arange(0,len(advect_dummy))/-100.),'r-',label='new particles')
ax4.plot(advect_dummy+old_dummy,(np.arange(0,len(old_dummy))/-100.),'b-',label='all particles')
ax4.plot(old_dummy,(np.arange(0,len(old_dummy))/-100.),'g-',label='old particles')
ax4.set_xlabel('Particle Count')
#ax4.set_xlabel('New Particle Count', color='r')
ax4.set_ylabel('Depth [m]')
#ax4.set_title('Number of Particles')
ax4.set_ylim([mc.mgrid.depth.values,0.])
ax4.set_xlim([0.,np.max(old_dummy+advect_dummy)])
#ax41.set_xlim([0.,np.max(old_dummy[1:])])
#ax41.set_ylim([mc.mgrid.depth.values,0.])
handles1, labels1 = ax4.get_legend_handles_labels()
#handles2, labels2 = ax41.get_legend_handles_labels()
ax4.legend(handles1, labels1, loc=4)
ax4.set_title('Vertical Marginal Count')
# ax41.legend(loc=4)
ax5 = plt.subplot(gs[5])
theta_p=vG.theta_thst(thS.mean(axis=1), mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpx=gaussian_filter1d(theta_p,sigma)
theta_mn=vG.theta_thst(thS.min(axis=1), mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpmn=gaussian_filter1d(theta_mn,sigma)
theta_mx=vG.theta_thst(thS.max(axis=1), mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpmx=gaussian_filter1d(theta_mx,sigma)
obs_id=np.argmin([np.abs(obsx.index[x]-t) for x in range(len(obsx))])
probloc=[-0.03,-0.1,-0.2,-0.3,-0.4]
ax5.plot(thpx,mc.zgrid[:,1],label='Particle')
ax5.plot(thpmx,mc.zgrid[:,1],'b--',label='Particle_min/max')
ax5.plot(thpmn,mc.zgrid[:,1],'b--')
ax5.plot(obsx.iloc[obs_id]/100.,probloc,'.',label='Observation')
ax5.set_xlim([mc.soilmatrix.tr.min(),mc.soilmatrix.ts.max()])
ax5.set_ylabel('Depth [m]')
ax5.set_xlabel('Theta')
handles2, labels2 = ax5.get_legend_handles_labels()
ax5.legend(handles2, labels2,loc=4)
ax5.set_title('Soil Moisture Log')
if saving:
plt.savefig(''.join(['./results/',runname,'t_',str(ix).zfill(3),'.png']))
plt.close(fig)
if store:
idz=[3,10,20,30,40]
return [np.concatenate([obsx.iloc[obs_id].values/100.,thpx[idz]]),advect_dummy]
def plotparticles_t(particles,thS,mc,vG,runname='test',t=0.,ix=0,sigma=0.5,fsize=(8, 8),saving=False,store=False):
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(2, 3, width_ratios=[2,1,1], height_ratios=[1,5])
ax1 = plt.subplot(gs[0])
ax11 = ax1.twinx()
advect_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age>0.)),'lat'].values).astype(np.int))
old_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age<=0.)),'lat'].values).astype(np.int))
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:],'b-')
ax11.plot((np.arange(0,len(old_dummy))/100.)[1:],old_dummy[1:],'g-')
ax11.set_ylabel('Particles', color='g')
ax11.set_xlim([0.,mc.mgrid.width.values])
ax1.set_xlim([0.,mc.mgrid.width.values])
ax1.set_ylabel('New Particles', color='b')
ax1.set_xlabel('Lat [m]')
ax1.set_title('Lateral Marginal Count')
ax2 = plt.subplot(gs[1:2])
ax2.axis('off')
ax2.text(0.1, 0.5, 'time: '+str(np.round(t/60.,1))+'min', fontsize=17)
ax2.text(0.1, 0.2, 'particles: '+str(sum(particles.z>mc.soildepth)), fontsize=17)
ax2.text(0.1, 0.8, runname, fontsize=17)
ax3 = plt.subplot(gs[3])
plt.imshow(sp.ndimage.filters.median_filter(thS,size=mc.smooth),vmin=0., vmax=1., cmap='Blues')
#plt.imshow(npart)
plt.colorbar()
plt.xlabel('Width [cells a 5 mm]')
plt.ylabel('Depth [cells a 5 mm]')
plt.title('Particle Density')
plt.tight_layout()
ax4 = plt.subplot(gs[4])
#ax41 = ax4.twiny()
onez=np.append(np.arange(0.,mc.soildepth,-0.01)+0.001,mc.soildepth) #one particle per cm soil
#onez=np.append(mc.zgrid[:,1]+0.001,mc.soildepth)
z1=np.append(particles.loc[((particles.age>0.)),'z'].values,onez)
advect_dummy=np.bincount(np.round(-100.0*z1).astype(np.int))-1
z2=np.append(particles.loc[((particles.age<=0.)),'z'].values,onez)
old_dummy=np.bincount(np.round(-100.0*z2).astype(np.int))-1
ax4.plot(advect_dummy,(np.arange(0,len(advect_dummy))/-100.),'r-',label='new particles')
ax4.plot(advect_dummy+old_dummy,(np.arange(0,len(old_dummy))/-100.),'b-',label='all particles')
ax4.plot(old_dummy,(np.arange(0,len(old_dummy))/-100.),'g-',label='old particles')
ax4.set_xlabel('Particle Count')
#ax4.set_xlabel('New Particle Count', color='r')
ax4.set_ylabel('Depth [m]')
#ax4.set_title('Number of Particles')
ax4.set_ylim([mc.mgrid.depth.values,0.])
ax4.set_xlim([0.,np.max(old_dummy+advect_dummy)])
#ax41.set_xlim([0.,np.max(old_dummy[1:])])
#ax41.set_ylim([mc.mgrid.depth.values,0.])
handles1, labels1 = ax4.get_legend_handles_labels()
#handles2, labels2 = ax41.get_legend_handles_labels()
ax4.legend(handles1, labels1, loc=4)
ax4.set_title('Vertical Marginal Count')
# ax41.legend(loc=4)
ax5 = plt.subplot(gs[5])
theta_p=vG.theta_thst(thS.mean(axis=1), mc.soilmatrix.ts[mc.soilgrid[:,1]-1], mc.soilmatrix.tr[mc.soilgrid[:,1]-1])
thpx=gaussian_filter1d(theta_p,sigma)
ax5.plot(thpx,mc.zgrid[:,1],label='Particle')
ax5.set_xlim([mc.soilmatrix.tr.min(),mc.soilmatrix.ts.max()])
ax5.set_ylabel('Depth [m]')
ax5.set_xlabel('Theta')
handles2, labels2 = ax5.get_legend_handles_labels()
ax5.legend(handles2, labels2,loc=4)
ax5.set_title('Soil Moisture Log')
if saving:
plt.savefig(''.join(['./results/',runname,'t_',str(ix).zfill(3),'.png']))
plt.close(fig)
if store:
idz=[3,10,20,30,40]
return [thpx[idz],advect_dummy]
def plotparticles_specht(particles,mc,pdyn,vG,runname='test',t=0.,ix=0,sigma=0.5,fsize=(3.6, 9),saving=False,relative=True):
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[thSn,npartn]=pdyn.gridupdate_thS(particles.loc[((particles.age>0.)),'lat'],particles.loc[((particles.age>0.)),'z'].values,mc)
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[4,1.1], height_ratios=[1,9],hspace=0.02, wspace=0.02)
#marginal X
ax1 = fig.add_subplot(gs[0,0])
old_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age<=0.)),'lat'].values).astype(np.int))
all_dummy=np.copy(old_dummy)
advect_dummy=old_dummy*0
advect_dummy2=np.bincount(np.round(100.0*particles.loc[((particles.age>0.)),'lat'].values).astype(np.int))
advect_dummy[advect_dummy2>0]+=advect_dummy2[advect_dummy2>0]
all_dummy+=advect_dummy
if relative:
old_dummy/=np.sum(old_dummy)
advect_dummy/=np.sum(advect_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:],'r-',label='new particles')
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:],'b-',label='all particles')
ax1.plot((np.arange(0,len(old_dummy))/100.)[1:],old_dummy[1:],'g-',label='old particles')
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:], 0., color='b',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],old_dummy[1:], 0., color='g',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:], 0., color='r',alpha=0.3)
ax1.set_xlim((0,0.34))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["top"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_visible(False)
#marginal Y
ax1 = fig.add_subplot(gs[1,1])
onez=np.append(mc.zgrid[:,1]+0.001,mc.soildepth)
z1=np.append(particles.loc[((particles.age>0.)),'z'].values,onez)
advect_dummy=np.bincount(np.round(-100.0*z1).astype(np.int))-1
all_dummy=np.copy(advect_dummy)
z2=np.append(particles.loc[((particles.age<=0.)),'z'].values,onez)
old_dummy=np.bincount(np.round(-100.0*z2).astype(np.int))-1
all_dummy+=old_dummy
if relative:
advect_dummy/=np.sum(advect_dummy)
old_dummy/=np.sum(old_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot(advect_dummy,(np.arange(0,len(advect_dummy))/-100.),'r-',label='new particles')
ax1.plot(advect_dummy+old_dummy,(np.arange(0,len(old_dummy))/-100.),'b-',label='all particles')
ax1.plot(old_dummy,(np.arange(0,len(old_dummy))/-100.),'g-',label='old particles')
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,all_dummy, color='b',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,old_dummy, color='g',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,advect_dummy, color='r',alpha=0.3)
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.set_ylim((-1.,0.))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
#legend
ax1 = fig.add_subplot(gs[0,1])
ax1.text(0.05, 0.1, 'run:\n'+runname[-6:-2]+'\n\n'+'time:\n'+str(np.round(t/60.,1))+'min')
#ax1.legend(handles1, labels1, loc=3)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
#main
ax1 = fig.add_subplot(gs[1,0])
plt.imshow(sp.ndimage.filters.median_filter(thS,size=mc.smooth),vmin=0., vmax=100., cmap='Blues',origin='lower')
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
# decorate axes
ax1.set_xlim((0,34))
ax1.set_ylim((100,0))
ax1.set_xticks([0,10,20,30])
ax1.set_yticks([100,75,50,25,0])
ax1.set_xticklabels([0,0.1,'width [m]',0.3])
ax1.set_yticklabels([-1.0,-0.75,-0.5,-0.25,0])
#ax1.text(40,-5,'width [m]')
ax1.text(-4,85,'depth [m]',rotation='vertical')
if saving:
plt.savefig(''.join(['./results/NWL',runname[-6:-2],'_',str(ix).zfill(3),'.pdf']))
plt.close(fig)
def plotparticles_colpach(particles,mc,pdyn,vG,runname='test',t=0.,ix=0,sigma=0.5,fsize=(3, 9.5),saving=False,relative=True):
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[thSn,npartn]=pdyn.gridupdate_thS(particles.loc[((particles.age>0.)),'lat'],particles.loc[((particles.age>0.)),'z'].values,mc)
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[4,1.1], height_ratios=[1,9],hspace=0.02, wspace=0.02)
#marginal X
ax1 = fig.add_subplot(gs[0,0])
old_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age<=0.)),'lat'].values).astype(np.int)).astype(float)
all_dummy=np.copy(old_dummy)
advect_dummy=old_dummy*0.
advect_dummy2=np.bincount(np.round(100.0*particles.loc[((particles.age>0.)),'lat'].values).astype(np.int)).astype(float)
advect_dummy[advect_dummy2>0]+=advect_dummy2[advect_dummy2>0]
all_dummy+=advect_dummy
if relative:
old_dummy/=np.sum(old_dummy)
advect_dummy/=np.sum(advect_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:],'r-',label='new particles')
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:],'b-',label='all particles')
ax1.plot((np.arange(0,len(old_dummy))/100.)[1:],old_dummy[1:],'g-',label='old particles')
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:], 0., color='b',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],old_dummy[1:], 0., color='g',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:], 0., color='r',alpha=0.3)
ax1.set_xlim((0,0.32))
ax1.set_ylim((0,np.amax(all_dummy)))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["top"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_visible(False)
#marginal Y
ax1 = fig.add_subplot(gs[1,1])
onez=np.append(mc.zgrid[:,1]+0.001,mc.soildepth)
z1=np.append(particles.loc[((particles.age>0.)),'z'].values,onez)
advect_dummy=np.bincount(np.round(-100.0*z1).astype(np.int)).astype(float)-1
all_dummy=np.copy(advect_dummy)
z2=np.append(particles.loc[((particles.age<=0.)),'z'].values,onez)
old_dummy=np.bincount(np.round(-100.0*z2).astype(np.int)).astype(float)-1
all_dummy+=old_dummy
if relative:
advect_dummy/=np.sum(advect_dummy)
old_dummy/=np.sum(old_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot(advect_dummy,(np.arange(0,len(advect_dummy))/-100.),'r-',label='new particles')
ax1.plot(advect_dummy+old_dummy,(np.arange(0,len(old_dummy))/-100.),'b-',label='all particles')
ax1.plot(old_dummy,(np.arange(0,len(old_dummy))/-100.),'g-',label='old particles')
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,all_dummy, color='b',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,old_dummy, color='g',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,advect_dummy, color='r',alpha=0.3)
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.set_ylim((-1.2,0.))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
#legend
ax1 = fig.add_subplot(gs[0,1])
ax1.text(0.05, 0.1, 'run:\n'+runname[-6:-2]+'\n\n'+'time:\n'+str(np.round(t/60.,1))+'min')
#ax1.legend(handles1, labels1, loc=3)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
#main
ax1 = fig.add_subplot(gs[1,0])
plt.imshow(sp.ndimage.filters.median_filter(thS,size=mc.smooth),vmin=0., vmax=100., cmap='Blues',origin='upper')
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
# decorate axes
ax1.set_xlim((0,64))
ax1.set_ylim((240,0))
ax1.set_xticks([0,20,40,60])
ax1.set_yticks([240,180,120,60,0])
ax1.set_xticklabels([0,0.1,'width [m]',0.3])
ax1.set_yticklabels([-1.2,-0.9,-0.6,-0.3,0])
#ax1.text(40,-5,'width [m]')
ax1.text(-8,200,'depth [m]',rotation='vertical')
if saving:
plt.savefig(''.join(['./results/NC',runname[-6:-2],'_',str(ix).zfill(3),'.pdf']))
plt.close(fig)
def plotparticles_column(particles,mc,pdyn,vG,runname='test',t=0.,ix=0,sigma=0.5,fsize=(4,4),saving=False,relative=False):
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[thSn,npartn]=pdyn.gridupdate_thS(particles.loc[((particles.age>0.)),'lat'],particles.loc[((particles.age>0.)),'z'].values,mc)
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[4,0.5], height_ratios=[0.5,4],hspace=0.02, wspace=0.02)
#marginal X
ax1 = fig.add_subplot(gs[0,0])
old_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age<=0.)),'lat'].values).astype(np.int)).astype(float)
all_dummy=np.copy(old_dummy)
advect_dummy=old_dummy*0.
advect_dummy2=np.bincount(np.round(100.0*particles.loc[((particles.age>0.)),'lat'].values).astype(np.int)).astype(float)
advect_dummy[advect_dummy2>0]+=advect_dummy2[advect_dummy2>0]
all_dummy+=advect_dummy
if relative:
old_dummy/=np.sum(old_dummy)
advect_dummy/=np.sum(advect_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:],'r-',label='new particles')
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:],'b-',label='all particles')
ax1.plot((np.arange(0,len(old_dummy))/100.)[1:],old_dummy[1:],'g-',label='old particles')
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:], 0., color='b',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],old_dummy[1:], 0., color='g',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:], 0., color='r',alpha=0.3)
ax1.set_xlim((0,1.))
#ax1.set_ylim((0,np.amax(all_dummy)))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["top"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_visible(False)
#marginal Y
ax1 = fig.add_subplot(gs[1,1])
onez=np.append(mc.zgrid[:,1]+0.001,mc.soildepth)
z1=np.append(particles.loc[((particles.age>0.)),'z'].values,onez)
advect_dummy=np.bincount(np.round(-100.0*z1).astype(np.int)).astype(float)-1
all_dummy=np.copy(advect_dummy)
z2=np.append(particles.loc[((particles.age<=0.)),'z'].values,onez)
old_dummy=np.bincount(np.round(-100.0*z2).astype(np.int)).astype(float)-1
all_dummy+=old_dummy
if relative:
advect_dummy/=np.sum(advect_dummy)
old_dummy/=np.sum(old_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot(advect_dummy,(np.arange(0,len(advect_dummy))/-100.),'r-',label='new particles')
ax1.plot(advect_dummy+old_dummy,(np.arange(0,len(old_dummy))/-100.),'b-',label='all particles')
ax1.plot(old_dummy,(np.arange(0,len(old_dummy))/-100.),'g-',label='old particles')
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,all_dummy, color='b',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,old_dummy, color='g',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,advect_dummy, color='r',alpha=0.3)
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.set_ylim((-1.,0.))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
#legend
ax1 = fig.add_subplot(gs[0,1])
ax1.text(0.05, 0.1, 'run:\n'+runname[-6:-2]+'\n\n'+'time:\n'+str(np.round(t/60.,1))+'min')
#ax1.legend(handles1, labels1, loc=3)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
#main
ax1 = fig.add_subplot(gs[1,0])
plt.imshow(sp.ndimage.filters.median_filter(thS,size=mc.smooth),vmin=0., vmax=80., cmap='Blues',origin='upper')
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
# decorate axes
ax1.set_xlim((0,200))
ax1.set_ylim((200,0))
ax1.set_xticks([0,50,100,150,200])
ax1.set_yticks([200,150,100,50,0])
ax1.set_xticklabels([0,0.25,0.5,'width [m]',1])
ax1.set_yticklabels([-1.0,'',-0.5,-0.25,0])
#ax1.text(40,-5,'width [m]')
ax1.text(-16,130,'depth [m]',rotation='vertical')
if saving:
plt.savefig(''.join(['./results/NCol',runname[-6:-2],'_',str(ix).zfill(3),'.pdf']))
plt.close(fig)
def plotparticles_hoevelerbach(particles,mc,pdyn,vG,runname='test',t=0.,ix=0,sigma=0.5,fsize=(2., 9.5),saving=False,relative=True):
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.ndimage.filters import gaussian_filter1d
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[thSn,npartn]=pdyn.gridupdate_thS(particles.loc[((particles.age>0.)),'lat'],particles.loc[((particles.age>0.)),'z'].values,mc)
fig=plt.figure(figsize=fsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[5,1.5], height_ratios=[1,19],hspace=0.02, wspace=0.02)
#marginal X
ax1 = fig.add_subplot(gs[0,0])
old_dummy=np.bincount(np.round(100.0*particles.loc[((particles.age<=0.)),'lat'].values).astype(np.int)).astype(float)
all_dummy=np.copy(old_dummy)
advect_dummy=old_dummy*0.
advect_dummy2=np.bincount(np.round(100.0*particles.loc[((particles.age>0.)),'lat'].values).astype(np.int)).astype(float)
advect_dummy[advect_dummy2>0]+=advect_dummy2[advect_dummy2>0]
all_dummy+=advect_dummy
if relative:
old_dummy/=np.sum(old_dummy)
advect_dummy/=np.sum(advect_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:],'r-',label='new particles')
ax1.plot((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:],'b-',label='all particles')
ax1.plot((np.arange(0,len(old_dummy))/100.)[1:],old_dummy[1:],'g-',label='old particles')
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:]+old_dummy[1:], 0., color='b',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],old_dummy[1:], 0., color='g',alpha=0.15)
ax1.fill_between((np.arange(0,len(advect_dummy))/100.)[1:],advect_dummy[1:], 0., color='r',alpha=0.3)
ax1.set_xlim((0,0.302))
ax1.set_ylim((0,np.amax(all_dummy)))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["top"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_visible(False)
#marginal Y
ax1 = fig.add_subplot(gs[1,1])
onez=np.append(mc.zgrid[:,1]+0.001,mc.soildepth)
z1=np.append(particles.loc[((particles.age>0.)),'z'].values,onez)
advect_dummy=np.bincount(np.round(-100.0*z1).astype(np.int)).astype(float)-1
all_dummy=np.copy(advect_dummy)
z2=np.append(particles.loc[((particles.age<=0.)),'z'].values,onez)
old_dummy=np.bincount(np.round(-100.0*z2).astype(np.int)).astype(float)-1
all_dummy+=old_dummy
if relative:
advect_dummy/=np.sum(advect_dummy)
old_dummy/=np.sum(old_dummy)
all_dummy/=np.sum(all_dummy)
ax1.plot(advect_dummy,(np.arange(0,len(advect_dummy))/-100.),'r-',label='new particles')
ax1.plot(advect_dummy+old_dummy,(np.arange(0,len(old_dummy))/-100.),'b-',label='all particles')
ax1.plot(old_dummy,(np.arange(0,len(old_dummy))/-100.),'g-',label='old particles')
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,all_dummy, color='b',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,old_dummy, color='g',alpha=0.15)
ax1.fill_betweenx((np.arange(0,len(old_dummy))/-100.),0.,advect_dummy, color='r',alpha=0.3)
handles1, labels1 = ax1.get_legend_handles_labels()
ax1.set_ylim((-1.8,0.))
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
#legend
ax1 = fig.add_subplot(gs[0,1])
ax1.text(0.05, 0.1, 'run:\n'+runname[-6:-2]+'\n\n'+'time:\n'+str(np.round(t/60.,1))+'min')
#ax1.legend(handles1, labels1, loc=3)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
#main
ax1 = fig.add_subplot(gs[1,0])
plt.imshow(sp.ndimage.filters.median_filter(thS,size=mc.smooth),vmin=0., vmax=100., cmap='Blues',origin='upper')
ax1.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["right"].set_visible(False)
# decorate axes
ax1.set_xlim((0,60))
ax1.set_ylim((360,0))
ax1.set_xticks([0,20,40,60])
ax1.set_yticks([360,300,240,180,120,60,0])
ax1.set_xticklabels([0,0.1,'width\n[m]',0.3])
ax1.set_yticklabels([-1.8,-1.5,-1.2,-0.9,-0.6,-0.3,0])
#ax1.text(40,-5,'width [m]')
ax1.text(-9,315,'depth [m]',rotation='vertical')
if saving:
plt.savefig(''.join(['./results/NC',runname[-6:-2],'_',str(ix).zfill(3),'.pdf']))
plt.close(fig)
| gpl-3.0 |
rwhitt2049/trouve | trouve/find_events.py | 1 | 3625 | import numpy as np
import pandas as pd
import toolz
from trouve.events import Events
@toolz.curry
def find_events(condition, period, name='events', transformations=None):
"""Find events based off a condition
Find events based off a ``bool`` conditional array and apply a sequence
of transformation functions to them. The ``find_events`` function is
curried via ``toolz.curry``. Most datasets are of the same sample rate,
this is a convenience so that one can specify it once.
Args:
condition (``numpy.ndarray`` or ``pandas.Series`` of ``bool``):
Boolean conditional array.
period (``float``):
Time in seconds between each data point. Requires constant
increment data that is uniform across the array. (1/Hz = s)
transformations (sequence of ``callable`` 's, optional):
Ordered sequence of transformation functions to apply to
events. Transformations are applied via ``toolz.pipe()``
name (``str``, optional): Default is ``'events'``.
User provided name for events.
Returns:
:class:`trouve.events.Events`:
Returns events found from ``condition`` with any supplied
``transformations`` applied.
Examples:
>>> import trouve as tr
>>> import trouve.transformations as tt
>>> import numpy as np
>>> deb = tt.debounce(2, 2)
>>> offsets = tt.offset_events(-1,2)
>>> filt_dur = tt.filter_durations(3, 5)
>>> x = np.array([4, 5, 1, 2, 3, 4, 5, 1, 3])
>>> condition = (x > 2)
>>> no_transforms = tr.find_events(condition, period=1)
>>> events = tr.find_events(condition, period=1,
... transformations=[deb, filt_dur, offsets])
>>> no_transforms.to_array() # doctest: +SKIP
array([ 1., 1., 0., 0., 1., 1., 1., 0., 1.])
>>> events.to_array() # doctest: +SKIP
array([ 0., 0., 0., 1., 1., 1., 1., 1., 1.])
"""
if isinstance(condition, pd.Series):
condition = condition.values
if transformations is None:
transformations = []
starts, stops = _apply_condition(condition)
raw_events = Events(starts, stops, period, name, condition.size)
transformed_events = toolz.pipe(raw_events, *transformations)
return transformed_events
def _apply_condition(condition):
"""Distill an array of bool into start and stop indexes
Convert a conditional array of bools into two numpy.ndarrays of
integers where starts are the indexes where condition goes from
False to True. Stops are the indexes where condition goes from
True to False.
Args:
condition (numpy.array of bool):
Returns:
tuple(numpy.ndarray, numpy.ndarray):
"""
if isinstance(condition, pd.core.series.Series):
condition = condition.values
mask = (condition > 0).view('i1')
slice_index = np.arange(mask.size + 1, dtype=np.int32)
# Determine if condition is active at array start, set to_begin accordingly
if mask[0] == 0:
to_begin = np.array([0], dtype='i1')
else:
to_begin = np.array([1], dtype='i1')
# Determine if condition is active at array end, set to_end accordingly
if mask[-1] == 0:
to_end = np.array([0], dtype='i1')
else:
to_end = np.array([-1], dtype='i1')
deltas = np.ediff1d(mask, to_begin=to_begin, to_end=to_end)
starts = np.ma.masked_where(deltas < 1, slice_index).compressed()
stops = np.ma.masked_where(deltas > -1, slice_index).compressed()
return starts, stops
| mit |
cauchycui/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
jlegendary/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
pxzhang94/GAN | GAN/mode_regularized_gan/mode_reg_gan_pytorch.py | 1 | 3606 | import torch
import torch.nn
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
z_dim = 128
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
cnt = 0
lr = 1e-4
lam1 = 1e-2
lam2 = 1e-2
def log(x):
return torch.log(x + 1e-8)
E = torch.nn.Sequential(
torch.nn.Linear(X_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, z_dim)
)
G = torch.nn.Sequential(
torch.nn.Linear(z_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, X_dim),
torch.nn.Sigmoid()
)
D = torch.nn.Sequential(
torch.nn.Linear(X_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, 1),
torch.nn.Sigmoid()
)
def reset_grad():
G.zero_grad()
D.zero_grad()
E.zero_grad()
def sample_X(size, include_y=False):
X, y = mnist.train.next_batch(size)
X = Variable(torch.from_numpy(X))
if include_y:
y = np.argmax(y, axis=1).astype(np.int)
y = Variable(torch.from_numpy(y))
return X, y
return X
E_solver = optim.Adam(E.parameters(), lr=lr)
G_solver = optim.Adam(G.parameters(), lr=lr)
D_solver = optim.Adam(D.parameters(), lr=lr)
for it in range(1000000):
""" Discriminator """
# Sample data
X = sample_X(mb_size)
z = Variable(torch.randn(mb_size, z_dim))
# Dicriminator_1 forward-loss-backward-update
G_sample = G(z)
D_real = D(X)
D_fake = D(G_sample)
D_loss = -torch.mean(log(D_real) + log(1 - D_fake))
D_loss.backward()
D_solver.step()
# Housekeeping - reset gradient
reset_grad()
""" Generator """
# Sample data
X = sample_X(mb_size)
z = Variable(torch.randn(mb_size, z_dim))
# Generator forward-loss-backward-update
G_sample = G(z)
G_sample_reg = G(E(X))
D_fake = D(G_sample)
D_reg = D(G_sample_reg)
mse = torch.sum((X - G_sample_reg)**2, 1)
reg = torch.mean(lam1 * mse + lam2 * log(D_reg))
G_loss = -torch.mean(log(D_fake)) + reg
G_loss.backward()
G_solver.step()
# Housekeeping - reset gradient
reset_grad()
""" Encoder """
# Sample data
X = sample_X(mb_size)
z = Variable(torch.randn(mb_size, z_dim))
G_sample_reg = G(E(X))
D_reg = D(G_sample_reg)
mse = torch.sum((X - G_sample_reg)**2, 1)
E_loss = torch.mean(lam1 * mse + lam2 * log(D_reg))
E_loss.backward()
E_solver.step()
# Housekeeping - reset gradient
reset_grad()
# Print and plot every now and then
if it % 1000 == 0:
print('Iter-{}; D_loss: {}; E_loss: {}; G_loss: {}'
.format(it, D_loss.data.numpy(), E_loss.data.numpy(), G_loss.data.numpy()))
samples = G(z).data.numpy()[:16]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'
.format(str(cnt).zfill(3)), bbox_inches='tight')
cnt += 1
plt.close(fig)
| apache-2.0 |
alexeyum/scikit-learn | sklearn/exceptions.py | 35 | 4329 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| bsd-3-clause |
RTHMaK/RPGOne | doc/examples/features_detection/plot_censure.py | 8 | 1167 | """
========================
CENSURE feature detector
========================
The CENSURE feature detector is a scale-invariant center-surround detector
(CENSURE) that claims to outperform other detectors and is capable of real-time
implementation.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img_orig = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5,
translation=(150, -200))
img_warp = tf.warp(img_orig, tform)
detector = CENSURE()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
plt.tight_layout()
detector.detect(img_orig)
ax[0].imshow(img_orig, cmap=plt.cm.gray)
ax[0].axis('off')
ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
detector.detect(img_warp)
ax[1].imshow(img_warp, cmap=plt.cm.gray)
ax[1].axis('off')
ax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
plt.show()
| apache-2.0 |
Christoph/tag-connect | hmm_stuff/CRF.py | 1 | 1847 | from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import sklearn
import pycrfsuite
import pandas as pd
import spacy
nlp = spacy.load('en')
data = pd.read_csv("data/train_small.csv")
test = pd.read_csv("data/test_hand_labeled.csv")
docs = [nlp(row) for row in data["text"]]
labels = [row for row in data["label"]]
def word2feature(word):
postag = word.pos_
features = [
'bias',
'word.lower=%s' % word.is_lower,
'word[-3:]=' + word.text[-3:],
'word[-2:]=' + word.text[-2:],
'word.isupper=%s' % word.is_upper,
'word.istitle=%s' % word.is_title,
'word.isdigit=%s' % word.is_digit,
'postag=' + postag,
]
if word.i > 0:
word1 = word.nbor(-1)
postag1 = word1.pos_
features.extend([
'-1:word.lower=%s' % word1.is_lower,
'-1:word.istitle=%s' % word1.is_title,
'-1:word.isupper=%s' % word1.is_upper,
'-1:postag=' + postag1,
])
else:
features.append('BOS')
if word.i < len(word.doc)-1:
word1 = word.nbor(1)
postag1 = word1.pos_
features.extend([
'+1:word.lower=%s' % word1.is_lower,
'+1:word.istitle=%s' % word1.is_title,
'+1:word.isupper=%s' % word1.is_upper,
'+1:postag=' + postag1,
'+1:postag[:2]=' + postag1[:2],
])
else:
features.append('EOS')
return features
def doc2features(doc):
return [word2feature(t) for t in doc]
X_train = [doc2features(d) for d in docs]
y_train = labels
X_test = [doc2features(nlp(d)) for d in test["text"]]
y_test = [d for d in test["label"]]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
| mit |
larsoner/mne-python | mne/decoding/tests/test_base.py | 12 | 15702 | # Author: Jean-Remi King, <jeanremi.king@gmail.com>
# Marijn van Vliet, <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_allclose, assert_array_less)
import pytest
from mne import create_info, EpochsArray
from mne.fixes import is_regressor, is_classifier
from mne.utils import requires_sklearn, requires_version
from mne.decoding.base import (_get_inverse_funcs, LinearModel, get_coef,
cross_val_multiscore, BaseEstimator)
from mne.decoding.search_light import SlidingEstimator
from mne.decoding import (Scaler, TransformerMixin, Vectorizer,
GeneralizingEstimator)
def _make_data(n_samples=1000, n_features=5, n_targets=3):
"""Generate some testing data.
Parameters
----------
n_samples : int
The number of samples.
n_features : int
The number of features.
n_targets : int
The number of targets.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The measured data.
Y : ndarray, shape (n_samples, n_targets)
The latent variables generating the data.
A : ndarray, shape (n_features, n_targets)
The forward model, mapping the latent variables (=Y) to the measured
data (=X).
"""
# Define Y latent factors
np.random.seed(0)
cov_Y = np.eye(n_targets) * 10 + np.random.rand(n_targets, n_targets)
cov_Y = (cov_Y + cov_Y.T) / 2.
mean_Y = np.random.rand(n_targets)
Y = np.random.multivariate_normal(mean_Y, cov_Y, size=n_samples)
# The Forward model
A = np.random.randn(n_features, n_targets)
X = Y.dot(A.T)
X += np.random.randn(n_samples, n_features) # add noise
X += np.random.rand(n_features) # Put an offset
return X, Y, A
@requires_sklearn
def test_get_coef():
"""Test getting linear coefficients (filters/patterns) from estimators."""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
lm_classification = LinearModel()
assert (is_classifier(lm_classification))
lm_regression = LinearModel(Ridge())
assert (is_regressor(lm_regression))
parameters = {'kernel': ['linear'], 'C': [1, 10]}
lm_gs_classification = LinearModel(
GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1))
assert (is_classifier(lm_gs_classification))
lm_gs_regression = LinearModel(
GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1))
assert (is_regressor(lm_gs_regression))
# Define a classifier, an invertible transformer and an non-invertible one.
class Clf(BaseEstimator):
def fit(self, X, y):
return self
class NoInv(TransformerMixin):
def fit(self, X, y):
return self
def transform(self, X):
return X
class Inv(NoInv):
def inverse_transform(self, X):
return X
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
# I. Test inverse function
# Check that we retrieve the right number of inverse functions even if
# there are nested pipelines
good_estimators = [
(1, make_pipeline(Inv(), Clf())),
(2, make_pipeline(Inv(), Inv(), Clf())),
(3, make_pipeline(Inv(), make_pipeline(Inv(), Inv()), Clf())),
]
for expected_n, est in good_estimators:
est.fit(X, y)
assert (expected_n == len(_get_inverse_funcs(est)))
bad_estimators = [
Clf(), # no preprocessing
Inv(), # final estimator isn't classifier
make_pipeline(NoInv(), Clf()), # first step isn't invertible
make_pipeline(Inv(), make_pipeline(
Inv(), NoInv()), Clf()), # nested step isn't invertible
]
for est in bad_estimators:
est.fit(X, y)
invs = _get_inverse_funcs(est)
assert_equal(invs, list())
# II. Test get coef for classification/regression estimators and pipelines
rng = np.random.RandomState(0)
for clf in (lm_regression,
lm_gs_classification,
make_pipeline(StandardScaler(), lm_classification),
make_pipeline(StandardScaler(), lm_gs_regression)):
# generate some categorical/continuous data
# according to the type of estimator.
if is_classifier(clf):
n, n_features = 1000, 3
X = rng.rand(n, n_features)
y = np.arange(n) % 2
else:
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
y = np.ravel(y)
clf.fit(X, y)
# Retrieve final linear model
filters = get_coef(clf, 'filters_', False)
if hasattr(clf, 'steps'):
if hasattr(clf.steps[-1][-1].model, 'best_estimator_'):
# Linear Model with GridSearchCV
coefs = clf.steps[-1][-1].model.best_estimator_.coef_
else:
# Standard Linear Model
coefs = clf.steps[-1][-1].model.coef_
else:
if hasattr(clf.model, 'best_estimator_'):
# Linear Model with GridSearchCV
coefs = clf.model.best_estimator_.coef_
else:
# Standard Linear Model
coefs = clf.model.coef_
if coefs.ndim == 2 and coefs.shape[0] == 1:
coefs = coefs[0]
assert_array_equal(filters, coefs)
patterns = get_coef(clf, 'patterns_', False)
assert (filters[0] != patterns[0])
n_chans = X.shape[1]
assert_array_equal(filters.shape, patterns.shape, [n_chans, n_chans])
# Inverse transform linear model
filters_inv = get_coef(clf, 'filters_', True)
assert (filters[0] != filters_inv[0])
patterns_inv = get_coef(clf, 'patterns_', True)
assert (patterns[0] != patterns_inv[0])
class _Noop(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return X.copy()
inverse_transform = transform
@requires_sklearn
@pytest.mark.parametrize('inverse', (True, False))
@pytest.mark.parametrize('Scale, kwargs', [
(Scaler, dict(info=None, scalings='mean')),
(_Noop, dict()),
])
def test_get_coef_inverse_transform(inverse, Scale, kwargs):
"""Test get_coef with and without inverse_transform."""
from sklearn.linear_model import Ridge
from sklearn.pipeline import make_pipeline
lm_regression = LinearModel(Ridge())
X, y, A = _make_data(n_samples=1000, n_features=3, n_targets=1)
# Check with search_light and combination of preprocessing ending with sl:
# slider = SlidingEstimator(make_pipeline(StandardScaler(), lm_regression))
# XXX : line above should work but does not as only last step is
# used in get_coef ...
slider = SlidingEstimator(make_pipeline(lm_regression))
X = np.transpose([X, -X], [1, 2, 0]) # invert X across 2 time samples
clf = make_pipeline(Scale(**kwargs), slider)
clf.fit(X, y)
patterns = get_coef(clf, 'patterns_', inverse)
filters = get_coef(clf, 'filters_', inverse)
assert_array_equal(filters.shape, patterns.shape, X.shape[1:])
# the two time samples get inverted patterns
assert_equal(patterns[0, 0], -patterns[0, 1])
for t in [0, 1]:
filters_t = get_coef(
clf.named_steps['slidingestimator'].estimators_[t],
'filters_', False)
if Scale is _Noop:
assert_array_equal(filters_t, filters[:, t])
@requires_sklearn
@pytest.mark.parametrize('n_features', [1, 5])
@pytest.mark.parametrize('n_targets', [1, 3])
def test_get_coef_multiclass(n_features, n_targets):
"""Test get_coef on multiclass problems."""
# Check patterns with more than 1 regressor
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.pipeline import make_pipeline
X, Y, A = _make_data(
n_samples=30000, n_features=n_features, n_targets=n_targets)
lm = LinearModel(LinearRegression()).fit(X, Y)
assert_array_equal(lm.filters_.shape, lm.patterns_.shape)
if n_targets == 1:
want_shape = (n_features,)
else:
want_shape = (n_targets, n_features)
assert_array_equal(lm.filters_.shape, want_shape)
if n_features > 1 and n_targets > 1:
assert_array_almost_equal(A, lm.patterns_.T, decimal=2)
lm = LinearModel(Ridge(alpha=0))
clf = make_pipeline(lm)
clf.fit(X, Y)
if n_features > 1 and n_targets > 1:
assert_allclose(A, lm.patterns_.T, atol=2e-2)
coef = get_coef(clf, 'patterns_', inverse_transform=True)
assert_allclose(lm.patterns_, coef, atol=1e-5)
# With epochs, scaler, and vectorizer (typical use case)
X_epo = X.reshape(X.shape + (1,))
info = create_info(n_features, 1000., 'eeg')
lm = LinearModel(Ridge(alpha=1))
clf = make_pipeline(
Scaler(info, scalings=dict(eeg=1.)), # XXX adding this step breaks
Vectorizer(),
lm,
)
clf.fit(X_epo, Y)
if n_features > 1 and n_targets > 1:
assert_allclose(A, lm.patterns_.T, atol=2e-2)
coef = get_coef(clf, 'patterns_', inverse_transform=True)
lm_patterns_ = lm.patterns_[..., np.newaxis]
assert_allclose(lm_patterns_, coef, atol=1e-5)
# Check can pass fitting parameters
lm.fit(X, Y, sample_weight=np.ones(len(Y)))
@requires_version('sklearn', '0.22') # roc_auc_ovr_weighted
@pytest.mark.parametrize('n_classes, n_channels, n_times', [
(4, 10, 2),
(4, 3, 2),
(3, 2, 1),
(3, 1, 2),
])
def test_get_coef_multiclass_full(n_classes, n_channels, n_times):
"""Test a full example with pattern extraction."""
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
data = np.zeros((10 * n_classes, n_channels, n_times))
# Make only the first channel informative
for ii in range(n_classes):
data[ii * 10:(ii + 1) * 10, 0] = ii
events = np.zeros((len(data), 3), int)
events[:, 0] = np.arange(len(events))
events[:, 2] = data[:, 0, 0]
info = create_info(n_channels, 1000., 'eeg')
epochs = EpochsArray(data, info, events, tmin=0)
clf = make_pipeline(
Scaler(epochs.info), Vectorizer(),
LinearModel(LogisticRegression(random_state=0, multi_class='ovr')),
)
scorer = 'roc_auc_ovr_weighted'
time_gen = GeneralizingEstimator(clf, scorer, verbose=True)
X = epochs.get_data()
y = epochs.events[:, 2]
n_splits = 3
cv = StratifiedKFold(n_splits=n_splits)
scores = cross_val_multiscore(time_gen, X, y, cv=cv, verbose=True)
want = (n_splits,)
if n_times > 1:
want += (n_times, n_times)
assert scores.shape == want
assert_array_less(0.8, scores)
clf.fit(X, y)
patterns = get_coef(clf, 'patterns_', inverse_transform=True)
assert patterns.shape == (n_classes, n_channels, n_times)
assert_allclose(patterns[:, 1:], 0., atol=1e-7) # no other channels useful
@requires_sklearn
def test_linearmodel():
"""Test LinearModel class for computing filters and patterns."""
# check categorical target fit in standard linear model
from sklearn.linear_model import LinearRegression
rng = np.random.RandomState(0)
clf = LinearModel()
n, n_features = 20, 3
X = rng.rand(n, n_features)
y = np.arange(n) % 2
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features,))
assert_equal(clf.patterns_.shape, (n_features,))
with pytest.raises(ValueError):
wrong_X = rng.rand(n, n_features, 99)
clf.fit(wrong_X, y)
# check categorical target fit in standard linear model with GridSearchCV
from sklearn import svm
from sklearn.model_selection import GridSearchCV
parameters = {'kernel': ['linear'], 'C': [1, 10]}
clf = LinearModel(
GridSearchCV(svm.SVC(), parameters, cv=2, refit=True, n_jobs=1))
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features,))
assert_equal(clf.patterns_.shape, (n_features,))
with pytest.raises(ValueError):
wrong_X = rng.rand(n, n_features, 99)
clf.fit(wrong_X, y)
# check continuous target fit in standard linear model with GridSearchCV
n_targets = 1
Y = rng.rand(n, n_targets)
clf = LinearModel(
GridSearchCV(svm.SVR(), parameters, cv=2, refit=True, n_jobs=1))
clf.fit(X, y)
assert_equal(clf.filters_.shape, (n_features, ))
assert_equal(clf.patterns_.shape, (n_features, ))
with pytest.raises(ValueError):
wrong_y = rng.rand(n, n_features, 99)
clf.fit(X, wrong_y)
# check multi-target fit in standard linear model
n_targets = 5
Y = rng.rand(n, n_targets)
clf = LinearModel(LinearRegression())
clf.fit(X, Y)
assert_equal(clf.filters_.shape, (n_targets, n_features))
assert_equal(clf.patterns_.shape, (n_targets, n_features))
with pytest.raises(ValueError):
wrong_y = rng.rand(n, n_features, 99)
clf.fit(X, wrong_y)
@requires_sklearn
def test_cross_val_multiscore():
"""Test cross_val_multiscore for computing scores on decoding over time."""
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn.linear_model import LogisticRegression, LinearRegression
logreg = LogisticRegression(solver='liblinear', random_state=0)
# compare to cross-val-score
X = np.random.rand(20, 3)
y = np.arange(20) % 2
cv = KFold(2, random_state=0, shuffle=True)
clf = logreg
assert_array_equal(cross_val_score(clf, X, y, cv=cv),
cross_val_multiscore(clf, X, y, cv=cv))
# Test with search light
X = np.random.rand(20, 4, 3)
y = np.arange(20) % 2
clf = SlidingEstimator(logreg, scoring='accuracy')
scores_acc = cross_val_multiscore(clf, X, y, cv=cv)
assert_array_equal(np.shape(scores_acc), [2, 3])
# check values
scores_acc_manual = list()
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
scores_acc_manual.append(clf.score(X[test], y[test]))
assert_array_equal(scores_acc, scores_acc_manual)
# check scoring metric
# raise an error if scoring is defined at cross-val-score level and
# search light, because search light does not return a 1-dimensional
# prediction.
pytest.raises(ValueError, cross_val_multiscore, clf, X, y, cv=cv,
scoring='roc_auc')
clf = SlidingEstimator(logreg, scoring='roc_auc')
scores_auc = cross_val_multiscore(clf, X, y, cv=cv, n_jobs=1)
scores_auc_manual = list()
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
scores_auc_manual.append(clf.score(X[test], y[test]))
assert_array_equal(scores_auc, scores_auc_manual)
# indirectly test that cross_val_multiscore rightly detects the type of
# estimator and generates a StratifiedKFold for classiers and a KFold
# otherwise
X = np.random.randn(1000, 3)
y = np.ones(1000, dtype=int)
y[::2] = 0
clf = logreg
reg = LinearRegression()
for cross_val in (cross_val_score, cross_val_multiscore):
manual = cross_val(clf, X, y, cv=StratifiedKFold(2))
auto = cross_val(clf, X, y, cv=2)
assert_array_equal(manual, auto)
manual = cross_val(reg, X, y, cv=KFold(2))
auto = cross_val(reg, X, y, cv=2)
assert_array_equal(manual, auto)
| bsd-3-clause |
hsaputra/tensorflow | tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py | 10 | 12183 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, noise.shape[1].value)
def discriminator_fn(data, _):
return layers.fully_connected(data, 1)
def mock_head(testcase, expected_generator_inputs, expected_real_data,
generator_scope_name):
"""Returns a mock head that validates logits values and variable names."""
discriminator_scope_name = 'Discriminator' # comes from TFGAN defaults
generator_var_names = set([
'%s/fully_connected/weights:0' % generator_scope_name,
'%s/fully_connected/biases:0' % generator_scope_name])
discriminator_var_names = set([
'%s/fully_connected/weights:0' % discriminator_scope_name,
'%s/fully_connected/biases:0' % discriminator_scope_name])
def _create_estimator_spec(features, mode, logits, labels):
gan_model = logits # renaming for clarity
is_predict = mode == model_fn_lib.ModeKeys.PREDICT
testcase.assertIsNone(features)
testcase.assertIsNone(labels)
testcase.assertIsInstance(gan_model, namedtuples.GANModel)
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
expected_var_names = (generator_var_names if is_predict else
generator_var_names | discriminator_var_names)
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
assertions = []
def _or_none(x):
return None if is_predict else x
testcase.assertEqual(expected_generator_inputs, gan_model.generator_inputs)
# TODO(joelshor): Add check on `generated_data`.
testcase.assertItemsEqual(
generator_var_names,
set([x.name for x in gan_model.generator_variables]))
testcase.assertEqual(generator_scope_name, gan_model.generator_scope.name)
testcase.assertEqual(_or_none(expected_real_data), gan_model.real_data)
# TODO(joelshor): Add check on `discriminator_real_outputs`.
# TODO(joelshor): Add check on `discriminator_gen_outputs`.
if is_predict:
testcase.assertIsNone(gan_model.discriminator_scope)
else:
testcase.assertEqual(discriminator_scope_name,
gan_model.discriminator_scope.name)
testcase.assertEqual(_or_none(discriminator_fn), gan_model.discriminator_fn)
with ops.control_dependencies(assertions):
if mode == model_fn_lib.ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
mode=mode, loss=array_ops.zeros([]),
train_op=control_flow_ops.no_op(), training_hooks=[])
elif mode == model_fn_lib.ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data,
loss=array_ops.zeros([]))
elif mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
testcase.fail('Invalid mode: {}'.format(mode))
head = test.mock.NonCallableMagicMock(spec=head_lib._Head)
head.create_estimator_spec = test.mock.MagicMock(
wraps=_create_estimator_spec)
return head
class GANModelFnTest(test.TestCase):
"""Tests that _gan_model_fn passes expected logits to mock head."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_logits_helper(self, mode):
"""Tests that the expected logits are passed to mock head."""
with ops.Graph().as_default():
training_util.get_or_create_global_step()
generator_inputs = {'x': array_ops.zeros([5, 4])}
real_data = (None if mode == model_fn_lib.ModeKeys.PREDICT else
array_ops.zeros([5, 4]))
generator_scope_name = 'generator'
head = mock_head(self,
expected_generator_inputs=generator_inputs,
expected_real_data=real_data,
generator_scope_name=generator_scope_name)
estimator_spec = estimator._gan_model_fn(
features=generator_inputs,
labels=real_data,
mode=mode,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_scope_name=generator_scope_name,
head=head)
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
if mode == model_fn_lib.ModeKeys.TRAIN:
sess.run(estimator_spec.train_op)
elif mode == model_fn_lib.ModeKeys.EVAL:
sess.run(estimator_spec.loss)
elif mode == model_fn_lib.ModeKeys.PREDICT:
sess.run(estimator_spec.predictions)
else:
self.fail('Invalid mode: {}'.format(mode))
def test_logits_predict(self):
self._test_logits_helper(model_fn_lib.ModeKeys.PREDICT)
def test_logits_eval(self):
self._test_logits_helper(model_fn_lib.ModeKeys.EVAL)
def test_logits_train(self):
self._test_logits_helper(model_fn_lib.ModeKeys.TRAIN)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
if __name__ == '__main__':
test.main()
| apache-2.0 |
cainiaocome/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
sangwook236/sangwook-library | python/test/language_processing/ocropus_data.py | 2 | 10608 | import time, glob
import numpy as np
import cv2
#import sklearn
#import swl.machine_learning.util as swl_ml_util
#import hangeul_util as hg_util
import text_line_data
# REF [site] >> https://github.com/tmbdev/ocropy
class OcropusTextLineDatasetBase(text_line_data.FileBasedTextLineDatasetBase):
def __init__(self, label_converter, image_height, image_width, image_channel, use_NWHC=True):
super().__init__(label_converter, image_height, image_width, image_channel, use_NWHC)
class EnglishOcropusTextLineDataset(OcropusTextLineDatasetBase):
def __init__(self, label_converter, data_dir_path, image_height, image_width, image_channel, train_test_ratio, max_label_len, use_NWHC=True):
super().__init__(label_converter, image_height, image_width, image_channel, use_NWHC)
if train_test_ratio < 0.0 or train_test_ratio > 1.0:
raise ValueError('Invalid train-test ratio: {}'.format(train_test_ratio))
#--------------------
if data_dir_path:
# Load data.
print('[SWL] Info: Start loading dataset...')
start_time = time.time()
image_filepaths, label_filepaths = sorted(glob.glob(data_dir_path + '/**/*.bin.png', recursive=False)), sorted(glob.glob(data_dir_path + '/**/*.gt.txt', recursive=False))
if not image_filepaths or not label_filepaths:
raise IOError('Failed to load data from {}.'.format(data_dir_path))
images, labels_str, labels_int = self._load_data_from_image_and_label_files(image_filepaths, label_filepaths, self._image_height, self._image_width, self._image_channel, max_label_len)
print('[SWL] Info: End loading dataset: {} secs.'.format(time.time() - start_time))
labels_str, labels_int = np.array(labels_str), np.array(labels_int)
num_examples = len(images)
indices = np.arange(num_examples)
np.random.shuffle(indices)
test_offset = round(train_test_ratio * num_examples)
train_indices, test_indices = indices[:test_offset], indices[test_offset:]
self._train_data, self._test_data = (images[train_indices], labels_str[train_indices], labels_int[train_indices]), (images[test_indices], labels_str[test_indices], labels_int[test_indices])
else:
print('[SWL] Info: Dataset were not loaded.')
self._train_data, self._test_data = None, None
num_examples = 0
def augment(self, inputs, outputs, *args, **kwargs):
return inputs, outputs
def preprocess(self, inputs, outputs, *args, **kwargs):
if inputs is not None:
# Contrast limited adaptive histogram equalization (CLAHE).
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
#inputs = np.array([clahe.apply(inp) for inp in inputs])
# TODO [check] >> Preprocessing has influence on recognition rate.
# Normalization, standardization, etc.
#inputs = inputs.astype(np.float32)
if False:
inputs = sklearn.preprocessing.scale(inputs, axis=0, with_mean=True, with_std=True, copy=True)
#inputs = sklearn.preprocessing.minmax_scale(inputs, feature_range=(0, 1), axis=0, copy=True) # [0, 1].
#inputs = sklearn.preprocessing.maxabs_scale(inputs, axis=0, copy=True) # [-1, 1].
#inputs = sklearn.preprocessing.robust_scale(inputs, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True)
elif False:
# NOTE [info] >> Not good.
inputs = (inputs - np.mean(inputs, axis=None)) / np.std(inputs, axis=None) # Standardization.
elif False:
# NOTE [info] >> Not bad.
in_min, in_max = 0, 255 #np.min(inputs), np.max(inputs)
out_min, out_max = 0, 1 #-1, 1
inputs = (inputs - in_min) * (out_max - out_min) / (in_max - in_min) + out_min # Normalization.
elif False:
inputs /= 255.0 # Normalization.
if outputs is not None:
# One-hot encoding.
#outputs = tf.keras.utils.to_categorical(outputs, num_classes).astype(np.uint8)
pass
return inputs, outputs
class HangeulOcropusTextLineDataset(OcropusTextLineDatasetBase):
def __init__(self, label_converter, data_dir_path, image_height, image_width, image_channel, train_test_ratio, max_label_len, use_NWHC=True):
super().__init__(label_converter, image_height, image_width, image_channel, use_NWHC)
if train_test_ratio < 0.0 or train_test_ratio > 1.0:
raise ValueError('Invalid train-test ratio: {}'.format(train_test_ratio))
#--------------------
if data_dir_path:
# Load data.
print('[SWL] Info: Start loading dataset...')
start_time = time.time()
image_filepaths, label_filepaths = sorted(glob.glob(data_dir_path + '/**/*.bin.png', recursive=False)), sorted(glob.glob(data_dir_path + '/**/*.gt.txt', recursive=False))
if not image_filepaths or not label_filepaths:
raise IOError('Failed to load data from {}.'.format(data_dir_path))
images, labels_str, labels_int = self._load_data_from_image_and_label_files(image_filepaths, label_filepaths, self._image_height, self._image_width, self._image_channel, max_label_len)
print('[SWL] Info: End loading dataset: {} secs.'.format(time.time() - start_time))
labels_str, labels_int = np.array(labels_str), np.array(labels_int)
num_examples = len(images)
indices = np.arange(num_examples)
np.random.shuffle(indices)
test_offset = round(train_test_ratio * num_examples)
train_indices, test_indices = indices[:test_offset], indices[test_offset:]
self._train_data, self._test_data = (images[train_indices], labels_str[train_indices], labels_int[train_indices]), (images[test_indices], labels_str[test_indices], labels_int[test_indices])
else:
print('[SWL] Info: Dataset were not loaded.')
self._train_data, self._test_data = None, None
num_examples = 0
def augment(self, inputs, outputs, *args, **kwargs):
return inputs, outputs
def preprocess(self, inputs, outputs, *args, **kwargs):
if inputs is not None:
# Contrast limited adaptive histogram equalization (CLAHE).
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
#inputs = np.array([clahe.apply(inp) for inp in inputs])
# TODO [check] >> Preprocessing has influence on recognition rate.
# Normalization, standardization, etc.
inputs = inputs.astype(np.float32)
if False:
inputs = sklearn.preprocessing.scale(inputs, axis=0, with_mean=True, with_std=True, copy=True)
#inputs = sklearn.preprocessing.minmax_scale(inputs, feature_range=(0, 1), axis=0, copy=True) # [0, 1].
#inputs = sklearn.preprocessing.maxabs_scale(inputs, axis=0, copy=True) # [-1, 1].
#inputs = sklearn.preprocessing.robust_scale(inputs, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True)
elif False:
# NOTE [info] >> Not good.
inputs = (inputs - np.mean(inputs, axis=None)) / np.std(inputs, axis=None) # Standardization.
elif False:
# NOTE [info] >> Not bad.
in_min, in_max = 0, 255 #np.min(inputs), np.max(inputs)
out_min, out_max = 0, 1 #-1, 1
inputs = (inputs - in_min) * (out_max - out_min) / (in_max - in_min) + out_min # Normalization.
elif False:
inputs /= 255.0 # Normalization.
elif True:
inputs = (inputs / 255.0) * 2.0 - 1.0 # Normalization.
if outputs is not None:
# One-hot encoding.
#outputs = tf.keras.utils.to_categorical(outputs, num_classes).astype(np.uint8)
pass
return inputs, outputs
class HangeulJamoOcropusTextLineDataset(OcropusTextLineDatasetBase):
def __init__(self, label_converter, data_dir_path, image_height, image_width, image_channel, train_test_ratio, max_label_len, use_NWHC=True):
super().__init__(label_converter, image_height, image_width, image_channel, use_NWHC)
if train_test_ratio < 0.0 or train_test_ratio > 1.0:
raise ValueError('Invalid train-test ratio: {}'.format(train_test_ratio))
#--------------------
if data_dir_path:
# Load data.
print('[SWL] Info: Start loading dataset...')
start_time = time.time()
image_filepaths, label_filepaths = sorted(glob.glob(data_dir_path + '/**/*.bin.png', recursive=False)), sorted(glob.glob(data_dir_path + '/**/*.gt.txt', recursive=False))
if not image_filepaths or not label_filepaths:
raise IOError('Failed to load data from {}.'.format(data_dir_path))
images, labels_str, labels_int = self._load_data_from_image_and_label_files(image_filepaths, label_filepaths, self._image_height, self._image_width, self._image_channel, max_label_len)
print('[SWL] Info: End loading dataset: {} secs.'.format(time.time() - start_time))
labels_str, labels_int = np.array(labels_str), np.array(labels_int)
num_examples = len(images)
indices = np.arange(num_examples)
np.random.shuffle(indices)
test_offset = round(train_test_ratio * num_examples)
train_indices, test_indices = indices[:test_offset], indices[test_offset:]
self._train_data, self._test_data = (images[train_indices], labels_str[train_indices], labels_int[train_indices]), (images[test_indices], labels_str[test_indices], labels_int[test_indices])
else:
print('[SWL] Info: Dataset were not loaded.')
self._train_data, self._test_data = None, None
num_examples = 0
def augment(self, inputs, outputs, *args, **kwargs):
return inputs, outputs
def preprocess(self, inputs, outputs, *args, **kwargs):
if inputs is not None:
# Contrast limited adaptive histogram equalization (CLAHE).
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
#inputs = np.array([clahe.apply(inp) for inp in inputs])
# TODO [check] >> Preprocessing has influence on recognition rate.
# Normalization, standardization, etc.
inputs = inputs.astype(np.float32)
if False:
inputs = sklearn.preprocessing.scale(inputs, axis=0, with_mean=True, with_std=True, copy=True)
#inputs = sklearn.preprocessing.minmax_scale(inputs, feature_range=(0, 1), axis=0, copy=True) # [0, 1].
#inputs = sklearn.preprocessing.maxabs_scale(inputs, axis=0, copy=True) # [-1, 1].
#inputs = sklearn.preprocessing.robust_scale(inputs, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True)
elif False:
# NOTE [info] >> Not good.
inputs = (inputs - np.mean(inputs, axis=None)) / np.std(inputs, axis=None) # Standardization.
elif False:
# NOTE [info] >> Not bad.
in_min, in_max = 0, 255 #np.min(inputs), np.max(inputs)
out_min, out_max = 0, 1 #-1, 1
inputs = (inputs - in_min) * (out_max - out_min) / (in_max - in_min) + out_min # Normalization.
elif False:
inputs /= 255.0 # Normalization.
elif True:
inputs = (inputs / 255.0) * 2.0 - 1.0 # Normalization.
if outputs is not None:
# One-hot encoding.
#outputs = tf.keras.utils.to_categorical(outputs, num_classes).astype(np.uint8)
pass
return inputs, outputs
| gpl-2.0 |
mdeger/nest-simulator | examples/neuronview/neuronview.py | 13 | 10676 | # -*- coding: utf-8 -*-
#
# neuronview.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk # noqa
import pango # noqa
import gobject # noqa
from matplotlib.figure import Figure # noqa
from matplotlib.backends.backend_gtkagg import \
FigureCanvasGTKAgg as FigureCanvas # noqa
import matplotlib.gridspec as gridspec # noqa
import os # noqa
import nest # noqa
default_neuron = "iaf_psc_alpha"
default_stimulator = "dc_generator"
class Main():
def __init__(self):
self._gladefile = "neuronview.glade"
self._builder = gtk.Builder()
self._builder.add_from_file(self._gladefile)
self._builder.connect_signals(self)
self._win = self._builder.get_object("mainwindow")
self._win.resize(900, 700)
box = self._builder.get_object("box5")
self._stimulatordictview = DictView()
self._builder.get_object("scrolledwindow2").add(
self._stimulatordictview)
box = self._builder.get_object("box4")
self._neurondictview = DictView()
self._builder.get_object("scrolledwindow3").add(self._neurondictview)
self.populate_comboboxes()
self._figure = Figure(figsize=(5, 4), dpi=100)
canvas = FigureCanvas(self._figure)
canvas.set_size_request(200, 250)
canvas.show()
box = self._builder.get_object("box3")
bg_style = box.get_style().bg[gtk.STATE_NORMAL]
gtk_color = (bg_style.red_float, bg_style.green_float,
bg_style.blue_float)
self._figure.set_facecolor(gtk_color)
box.pack_start(canvas)
self._win.show()
gtk.main()
def update_figure(self, spikes, potentials):
if nest.GetKernelStatus("time") != 0.0:
self._figure.clear()
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
ax0 = self._figure.add_subplot(gs[0])
ax0.plot(spikes[0]["times"], [1] * len(spikes[0]["times"]), ".")
ax0.set_yticks([])
ax0.set_xticks([])
ax1 = self._figure.add_subplot(gs[1])
ax1.plot(potentials[0]["times"], potentials[0]["V_m"], "r-")
ax1.set_ylabel("$V_m$ (mV)")
ax1.set_xlabel("time (s)")
# plt.tight_layout()
self._figure.canvas.draw()
def filter_statusdict(self, params):
for key in ["archiver_length", "available", "capacity",
"elementsize", "frozen", "global_id",
"instantiations", "is_refractory", "local",
"model", "element_type", "offset", "origin",
"receptor_types", "recordables",
"refractory_input", "rmax", "state", "t_spike",
"thread", "tlast", "tspike", "type_id", "vp",
"ymod"]:
if key in params.keys():
params.pop(key)
def populate_comboboxes(self):
neuronmodels = self._builder.get_object("neuronmodels")
neuronmodelsliststore = neuronmodels.get_model()
stimulatormodels = self._builder.get_object("stimulatormodels")
stimulatormodelsliststore = stimulatormodels.get_model()
neuron_it = None
stimulator_it = None
models = nest.Models("nodes")
models = [x for x in models if
x not in ["correlation_detector", "sli_neuron",
"iaf_psc_alpha_norec", "parrot_neuron",
"parrot_neuron_ps"]]
for entry in models:
try:
entrytype = nest.GetDefaults(entry)["element_type"]
except:
entrytype = "unknown"
if entrytype == "neuron":
it = neuronmodelsliststore.append([entry])
if entry == default_neuron:
neuron_it = it
elif entrytype == "stimulator":
it = stimulatormodelsliststore.append([entry])
if entry == default_stimulator:
stimulator_it = it
cell = gtk.CellRendererText()
neuronmodels.pack_start(cell, True)
neuronmodels.add_attribute(cell, 'text', 0)
neuronmodels.set_active_iter(neuron_it)
stimulatormodels.pack_start(cell, True)
stimulatormodels.add_attribute(cell, 'text', 0)
stimulatormodels.set_active_iter(stimulator_it)
docviewcombo = self._builder.get_object("docviewcombo")
docviewcomboliststore = docviewcombo.get_model()
docviewcomboliststore.append(["Stimulating device"])
it = docviewcomboliststore.append(["Neuron"])
docviewcombo.pack_start(cell, True)
docviewcombo.add_attribute(cell, 'text', 0)
docviewcombo.set_active_iter(it)
def get_help_text(self, name):
nest.sli_run("statusdict /prgdocdir get")
docdir = nest.sli_pop()
helptext = "No documentation available"
for subdir in ["cc", "sli"]:
filename = os.path.join(docdir, "help", subdir, name + ".hlp")
if os.path.isfile(filename):
helptext = open(filename, 'r').read()
return helptext
def on_model_selected(self, widget):
liststore = widget.get_model()
model = liststore.get_value(widget.get_active_iter(), 0)
statusdict = nest.GetDefaults(model)
self.filter_statusdict(statusdict)
if widget == self._builder.get_object("neuronmodels"):
self._neurondictview.set_params(statusdict)
if widget == self._builder.get_object("stimulatormodels"):
self._stimulatordictview.set_params(statusdict)
self.on_doc_selected(self._builder.get_object("docviewcombo"))
def on_doc_selected(self, widget):
liststore = widget.get_model()
doc = liststore.get_value(widget.get_active_iter(), 0)
docview = self._builder.get_object("docview")
docbuffer = gtk.TextBuffer()
if doc == "Neuron":
combobox = self._builder.get_object("neuronmodels")
if doc == "Stimulating device":
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
model = liststore.get_value(combobox.get_active_iter(), 0)
docbuffer.set_text(self.get_help_text(model))
docview.set_buffer(docbuffer)
docview.modify_font(pango.FontDescription("monospace 10"))
def on_simulate_clicked(self, widget):
nest.ResetKernel()
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
stimulatormodel = liststore.get_value(combobox.get_active_iter(), 0)
params = self._stimulatordictview.get_params()
stimulator = nest.Create(stimulatormodel, params=params)
combobox = self._builder.get_object("neuronmodels")
liststore = combobox.get_model()
neuronmodel = liststore.get_value(combobox.get_active_iter(), 0)
neuron = nest.Create(neuronmodel,
params=self._neurondictview.get_params())
weight = self._builder.get_object("weight").get_value()
delay = self._builder.get_object("delay").get_value()
nest.Connect(stimulator, neuron, weight, delay)
sd = nest.Create("spike_detector", params={"record_to": ["memory"]})
nest.Connect(neuron, sd)
vm = nest.Create("voltmeter", params={"record_to": ["memory"],
"interval": 0.1})
nest.Connect(vm, neuron)
simtime = self._builder.get_object("simtime").get_value()
nest.Simulate(simtime)
self.update_figure(nest.GetStatus(sd, "events"),
nest.GetStatus(vm, "events"))
def on_delete_event(self, widget, event):
self.on_quit(widget)
return True
def on_quit(self, project):
self._builder.get_object("mainwindow").hide()
gtk.main_quit()
class DictView(gtk.TreeView):
def __init__(self, params=None):
gtk.TreeView.__init__(self)
if params:
self.params = params
self.repopulate()
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", renderer, text=1)
self.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("mode", gtk.CELL_RENDERER_MODE_EDITABLE)
renderer.set_property("editable", True)
column = gtk.TreeViewColumn("Value", renderer, text=2)
self.append_column(column)
self.set_size_request(200, 150)
renderer.connect("edited", self.check_value)
self.show()
def repopulate(self):
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING,
gobject.TYPE_STRING)
for key in sorted(self.params.keys()):
pos = model.insert_after(None, None)
data = {"key": key, "element_type": type(self.params[key])}
model.set_value(pos, 0, data)
model.set_value(pos, 1, str(key))
model.set_value(pos, 2, str(self.params[key]))
self.set_model(model)
def check_value(self, widget, path, new_text):
model = self.get_model()
data = model[path][0]
try:
typename = data["element_type"].__name__
new_value = eval("%s('%s')" % (typename, new_text))
if typename == "bool" and new_text.lower() in ["false", "0"]:
new_value = False
self.params[data["key"]] = new_value
model[path][2] = str(new_value)
except ValueError:
old_value = self.params[data["key"]]
model[path][2] = str(old_value)
def get_params(self):
return self.params
def set_params(self, params):
self.params = params
self.repopulate()
if __name__ == "__main__":
Main()
| gpl-2.0 |
sniemi/EuclidVisibleInstrument | sandbox/example_digits.py | 1 | 1213 | # load the digits dataset from scikit-learn
# 901 samples, about 180 samples per class
# the digits represented 0,1,2,3,4
from sklearn import datasets
digits = datasets.load_digits(n_class=4)
data = digits.data # matrix where each row is a vector that represent a digit.
num = digits.target # num[i] is the digit represented by data[i]
# training the som
from minisom import MiniSom
som = MiniSom(20,20,64,sigma=.8,learning_rate=0.5)
print("Training...")
som.train_random(data,1500) # random training
print("\n...ready!")
# plotting the results
from pylab import text,show,cm,axis,figure,subplot,imshow,zeros
wmap = {}
figure(1)
im = 0
for x,t in zip(data,num): # scatterplot
w = som.winner(x)
wmap[w] = im
text(w[0]+.5, w[1]+.5, str(t), color=cm.Dark2(t / 4.), fontdict={'weight': 'bold', 'size': 11})
im = im + 1
axis([0,som.weights.shape[0],0,som.weights.shape[1]])
figure(2,facecolor='white')
cnt = 0
for i in range(20): # images mosaic
for j in range(20):
subplot(20,20,cnt,frameon=False, xticks=[], yticks=[])
if (i,j) in wmap:
imshow(digits.images[wmap[(i,j)]], cmap='Greys', interpolation='nearest')
else:
imshow(zeros((8,8)), cmap='Greys')
cnt = cnt + 1
show() # show the figure
| bsd-2-clause |
depet/scikit-learn | sklearn/decomposition/tests/test_pca.py | 1 | 12507 | import warnings
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less, assert_greater
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import ProbabilisticPCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
def test_whitening():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_pca_check_projection():
"""Test that the projection of data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on dense data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
"""Test that the projection by RandomizedPCA on list data is correct"""
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on dense data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_sparse_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on sparse data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
X = csr_matrix(X)
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Xt = csr_matrix(Xt)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Yt /= np.sqrt((Yt ** 2).sum())
np.testing.assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_sparse_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on sparse data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
# no large means because the sparse version of randomized pca does not do
# centering to avoid breaking the sparsity
X = csr_matrix(X)
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X.todense(), Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
assert_equal(len(w), 1)
assert_equal(w[0].category, DeprecationWarning)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X.todense() - Y_inverse)
/ np.abs(X).mean()).max()
# XXX: this does not seam to work as expected:
assert_almost_equal(relative_max_delta, 0.91, decimal=2)
def test_pca_dim():
"""Check automated dimensionality setting"""
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
"""
"""
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_probabilistic_pca_1():
"""Test that probabilistic PCA yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1.mean() / h, 1, 0)
def test_probabilistic_pca_2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
ll2 = ppca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1.mean(), ll2.mean())
def test_probabilistic_pca_3():
"""The homoscedastic model should work slightly worth
than the heteroscedastic one in over-fitting condition
"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
ppca.fit(X, homoscedastic=False)
ll2 = ppca.score(X)
assert_less(ll1.mean(), ll2.mean())
def test_probabilistic_pca_4():
"""Check that ppca select the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
ppca = ProbabilisticPCA(n_components=k)
ppca.fit(Xl)
ll[k] = ppca.score(Xt).mean()
assert_true(ll.argmax() == 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
tyarkoni/pliers | setup.py | 1 | 1553 | from setuptools import setup, find_packages
import os
extra_setuptools_args = dict(
tests_require=['pytest']
)
thispath, _ = os.path.split(__file__)
ver_file = os.path.join(thispath, 'pliers', 'version.py')
with open(ver_file) as fp:
exec(fp.read(), globals(), locals())
setup(
name="pliers",
version=locals()['__version__'],
description="Multimodal feature extraction in Python",
maintainer='Tal Yarkoni',
maintainer_email='tyarkoni@gmail.com',
url='http://github.com/tyarkoni/pliers',
install_requires=['numpy', 'scipy', 'moviepy', 'pandas',
'pillow', 'python-magic', 'requests', 'nltk'],
packages=find_packages(exclude=['pliers/tests']),
license='MIT',
package_data={'pliers': ['datasets/*'],
'pliers.tests': ['data/*/*']
},
zip_safe=False,
download_url='https://github.com/tyarkoni/pliers/archive/%s.tar.gz' %
__version__,
**extra_setuptools_args,
extras_require={
'all': ['clarifai', 'duecredit', 'face_recognition', 'python-twitter',
'gensim', 'google-api-python-client', 'google-compute-engine',
'librosa>=0.6.3' 'numba<=0.48', 'matplotlib', 'opencv-python',
'pathos', 'pygraphviz', 'pysrt', 'pytesseract',
'python-twitter', 'scikit-learn', 'seaborn', 'soundfile',
'spacy', 'SpeechRecognition>=3.6.0', 'tensorflow>=1.0.0',
'torch', 'transformers', 'xlrd', 'rev_ai']
},
python_requires='>=3.5',
)
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/metrics/classification.py | 28 | 67703 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..preprocessing import MultiLabelBinarizer
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multilabel-sequences', \
'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator",
"multilabel-sequences"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
if y_type == 'multilabel-sequences':
labels = unique_labels(y_true, y_pred)
binarizer = MultiLabelBinarizer(classes=labels, sparse_output=True)
y_true = binarizer.fit_transform(y_true)
y_pred = binarizer.fit_transform(y_pred)
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array([0, 1, 2, 0, 1, 2])
>>> y_pred = np.array([0, 2, 1, 0, 0, 1])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
verilylifesciences/purplequery | purplequery/dataframe_node.py | 1 | 18815 | # Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
'''All subclasses of DataframeNode'''
import itertools
import operator
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union # noqa: F401
import pandas as pd
from six.moves import reduce
from .bq_abstract_syntax_tree import (EMPTY_CONTEXT, EMPTY_NODE, # noqa: F401
AbstractSyntaxTreeNode, DataframeNode, DatasetType,
EvaluatableNode, EvaluationContext, Field,
MarkerSyntaxTreeNode, TableContext, _EmptyNode)
from .bq_types import (BQArray, BQStructType, BQType, TypedDataFrame, TypedSeries, # noqa: F401
implicitly_coerce)
from .evaluatable_node import Array, Selector, StarSelector, Value # noqa: F401
from .join import DataSource # noqa: F401
DEFAULT_TABLE_NAME = None
_OrderByType = List[Tuple[Field, str]]
_LimitType = Tuple[EvaluatableNode, EvaluatableNode]
class _WithTableContext(TableContext):
'''A TableContext augmented by a WITH clause.'''
def __init__(self, name, table, parent_context):
# type: (str, TypedDataFrame, TableContext) -> None
self.name = name
self.table = table
self.parent_context = parent_context
def lookup(self, path):
# type: (Sequence[str]) -> Tuple[TypedDataFrame, Optional[str]]
'''Look up a path to a table in this context.'''
if len(path) == 1 and path[0] == self.name:
return self.table, self.name
if '.'.join(path) == self.name:
return self.table, path[-1]
return self.parent_context.lookup(path)
class QueryExpression(DataframeNode):
'''Highest level definition of a query.
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#sql-syntax
(see query_expr)
'''
def __init__(self,
with_clauses, # type: Union[_EmptyNode, List[Tuple[str, DataframeNode]]]
base_query, # type: DataframeNode
order_by, # type: Union[_EmptyNode, _OrderByType]
limit, # type: Union[_EmptyNode, _LimitType]
):
# type: (...) -> None
'''Set up QueryExpression node.
Args:
with_clauses: Optional WITH expression
base_query: Main part of query
order_by: Expression by which to order results
limit: Number of rows to return, possibly with an offset
'''
self.with_clauses = with_clauses
self.base_query = base_query
self.order_by = order_by
self.limit = limit
def _order_by(self, order_by, typed_dataframe, table_name, table_context):
# type: (_OrderByType, TypedDataFrame, Optional[str], TableContext) -> TypedDataFrame
'''If ORDER BY is specified, sort the data by the given column(s)
in the given direction(s).
Args:
typed_dataframe: The currently resolved query as a TypedDataFrame
table_name: Resolved name of current typed_dataframe
table_context: A representation of the state of available tables
Returns:
A new TypedDataFrame that is ordered by the given criteria
'''
context = EvaluationContext(table_context)
context.add_table_from_dataframe(typed_dataframe, table_name, EMPTY_NODE)
# order_by is a list of (field, direction) tuples to sort by
fields = []
directions = [] # ascending = True, descending = False
for field, direction in order_by:
if isinstance(field, Field):
path = '.'.join(context.get_canonical_path(field.path))
fields.append(path)
elif isinstance(field, Value):
if not isinstance(field.value, int):
raise ValueError('Attempt to order by a literal non-integer constant {}'
.format(field.value))
index = field.value - 1 # order by 1 means the first field, i.e. index 0
fields.append(context.table.dataframe.columns[index])
else:
raise ValueError('Invalid field specification {}'.format(field))
if direction == 'DESC':
directions.append(False)
else:
# Default sort order in Standard SQL is ASC
directions.append(True)
return TypedDataFrame(
context.table.dataframe.sort_values(fields, ascending=directions),
context.table.types)
def _limit(self, limit, typed_dataframe):
# type: (_LimitType, TypedDataFrame) -> TypedDataFrame
'''If limit is specified, only return that many rows.
If offset is specified, start at that row number, not the first row.
Args:
typed_dataframe: The currently resolved query as a TypedDataFrame
Returns:
A new TypedDataFrame that conforms to the given limit and offset
'''
limit_expression, offset_expression = limit
# Use empty context because the limit is a constant
limit_value = limit_expression.evaluate(EMPTY_CONTEXT)
if not isinstance(limit_value, TypedSeries):
raise ValueError("invalid limit expression {}".format(limit_expression))
limit, = limit_value.series
if offset_expression is not EMPTY_NODE:
# Use empty context because the offset is also a constant
offset_value = offset_expression.evaluate(EMPTY_CONTEXT)
if not isinstance(offset_value, TypedSeries):
raise ValueError("invalid offset expression {}".format(offset_expression))
offset, = offset_value.series
else:
offset = 0
return TypedDataFrame(
typed_dataframe.dataframe[offset:limit + offset],
typed_dataframe.types)
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
if not isinstance(self.with_clauses, _EmptyNode):
name_list = [name for name, _ in self.with_clauses]
if len(name_list) > len(set(name_list)):
raise ValueError("Duplicate names in WITH clauses are not allowed: {}"
.format(name_list))
for name, dataframe_node in self.with_clauses:
table_context = _WithTableContext(name,
dataframe_node.get_dataframe(table_context)[0],
table_context)
typed_dataframe, table_name = self.base_query.get_dataframe(table_context, outer_context)
if not isinstance(self.order_by, _EmptyNode):
typed_dataframe = self._order_by(
self.order_by, typed_dataframe, table_name, table_context)
if not isinstance(self.limit, _EmptyNode):
typed_dataframe = self._limit(self.limit, typed_dataframe)
return typed_dataframe, DEFAULT_TABLE_NAME
class SetOperation(DataframeNode):
'''Represents a set operation between two other query expressions - UNION, INTERSECT, etc.'''
def __init__(self, left_query, set_operator, right_query):
# type: (DataframeNode, str, DataframeNode) -> None
self.left_query = left_query
self.set_operator = set_operator
self.right_query = right_query
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
left_dataframe, unused_left_name = self.left_query.get_dataframe(
table_context, outer_context)
right_dataframe, unused_right_name = self.right_query.get_dataframe(
table_context, outer_context)
num_left_columns = len(left_dataframe.types)
num_right_columns = len(right_dataframe.types)
if num_left_columns != num_right_columns:
raise ValueError("Queries in {} ALL have mismatched column count: {} vs {}"
.format(self.set_operator, num_left_columns, num_right_columns))
combined_types = [implicitly_coerce(left_type, right_type)
for left_type, right_type in zip(left_dataframe.types,
right_dataframe.types)]
if self.set_operator == 'UNION_ALL':
return TypedDataFrame(
pd.concat([left_dataframe.dataframe,
# Rename second table to use first table's column names
right_dataframe.dataframe.rename(
columns=dict(zip(right_dataframe.dataframe.columns,
left_dataframe.dataframe.columns)))]),
combined_types), DEFAULT_TABLE_NAME
else:
raise NotImplementedError("set operation {} not implemented".format(self.set_operator))
def _evaluate_fields_as_dataframe(fields, context):
# type: (Sequence[EvaluatableNode], EvaluationContext) -> TypedDataFrame
'''Evaluates a list of expressions and constructs a TypedDataFrame from the result.
Args:
fields: A list of expressions (evaluatable abstract syntax tree nodes)
context: The context to evaluate the expressions
Returns:
A TypedDataFrame consisting of the results of the evaluation.
'''
# Evaluates each of the given fields to get a list of tables and/or
# single columns
evaluated_fields = [field.evaluate(context) for field in fields]
# Creates one large table out of each of the evaluated field
# tables/columns
types = reduce(operator.add,
[field.types for field in evaluated_fields], []) # type: List[BQType]
combined_evaluated_data = (
pd.concat([field.dataframe for field in evaluated_fields], axis=1)
if evaluated_fields else pd.DataFrame([]))
return TypedDataFrame(combined_evaluated_data, types)
class Select(MarkerSyntaxTreeNode, DataframeNode):
'''SELECT query to retrieve rows from a table(s).
https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select-list
'''
def __init__(self, modifier, # type: AbstractSyntaxTreeNode
fields, # type: Sequence[Union[Selector, StarSelector]]
from_, # type: Union[_EmptyNode, DataSource]
where, # type: Union[_EmptyNode, EvaluatableNode]
group_by, # type: Union[_EmptyNode, List[Union[Value, Field]]]
having # type: Union[_EmptyNode, EvaluatableNode]
):
# type: (...) -> None
'''Set up SELECT node.
Args:
modifier: Optional ALL or DISTINCT modifier
fields: Columns to return
from_: Table/expression from which to retrieve rows
where: WHERE filter condition, if any
group_by: GROUP BY grouping condition, if any
having: HAVING filter condition, if any
'''
self.modifier = modifier
self.fields = fields
for i, field in enumerate(self.fields):
field.position = i + 1 # position is 1-up, i.e the first selector is position #1.
self.from_ = from_
self.where = where
if isinstance(group_by, _EmptyNode):
self.group_by = group_by # type: Union[_EmptyNode, List[Field]]
else:
self.group_by = []
for grouper in group_by:
if isinstance(grouper, Value):
if not isinstance(grouper.value, int):
raise ValueError('Attempt to group by a literal non-integer constant {}'
.format(grouper.value))
# GROUP BY 3 means group by the third field in the select, the field at index 2,
# i.e. we have to subtract one from the user-specified value to get the index.
# We construct a one-element field path just as if they'd specified the name
# of the corresponding field.
nth_field = self.fields[grouper.value - 1]
if not isinstance(nth_field, Selector):
raise ValueError('cannot GROUP BY {}th selector'.format(grouper.value))
grouper_path = (nth_field.name(),)
self.group_by.append(Field(grouper_path))
else:
self.group_by.append(grouper)
self.having = having
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''Scope the given datasets by the criteria specified in the
instance's fields.
Args:
table_context: All the tables in the database
outer_context: The context of the outer query, if this Select is a subquery;
otherwise None
Returns:
Tuple of the resulting table (TypedDataFrame) and a name for
this table
'''
if isinstance(self.from_, _EmptyNode):
context = EvaluationContext(table_context)
else:
context = self.from_.create_context(table_context)
if outer_context:
context.add_subcontext(outer_context)
# Expand out any * fields so that we have a list just of selectors.
expanded_fields = list(itertools.chain(*[
[selector] if isinstance(selector, Selector)
else selector.get_selectors(context)
for selector in self.fields]))
context.selector_names = [
selector.name() for selector in self.fields if isinstance(selector, Selector)]
if not isinstance(self.where, _EmptyNode):
# Filter table by WHERE condition
rows_to_keep = self.where.evaluate(context)
if not isinstance(rows_to_keep, TypedSeries):
raise ValueError("Invalid WHERE expression {}".format(rows_to_keep))
context.table = TypedDataFrame(
context.table.dataframe.loc[rows_to_keep.series],
context.table.types)
if not isinstance(self.group_by, _EmptyNode):
fields_for_evaluation = context.do_group_by(
expanded_fields, self.group_by) # type: Sequence[EvaluatableNode]
elif any(field.is_aggregated() for field in expanded_fields):
fields_for_evaluation = context.do_group_by(expanded_fields, [])
else:
fields_for_evaluation = expanded_fields
result = _evaluate_fields_as_dataframe(fields_for_evaluation, context)
if not isinstance(self.having, _EmptyNode):
having_context = EvaluationContext(table_context)
having_context.add_table_from_dataframe(result, None, EMPTY_NODE)
having_context.add_subcontext(context)
having_context.group_by_paths = context.group_by_paths
having = self.having.mark_grouped_by(context.group_by_paths, having_context)
rows_to_keep = having.evaluate(having_context)
if not isinstance(rows_to_keep, TypedSeries):
raise ValueError("Invalid HAVING expression {}".format(rows_to_keep))
result = TypedDataFrame(result.dataframe.loc[rows_to_keep.series], result.types)
if self.modifier == 'DISTINCT':
result = TypedDataFrame(result.dataframe.drop_duplicates(), result.types)
return result, DEFAULT_TABLE_NAME
class TableReference(DataframeNode):
'''A table reference specified as Project.Dataset.Table (or possibly
Dataset.Table or just Table if there is only one project and/or dataset).
'''
def __init__(self, path):
# type: (Tuple[str, ...]) -> None
# If the table reference is specified with backticks, it will be parsed
# as one element, so we need to split into parts here.
if len(path) == 1:
split_path = path[0].split('.') # type: List[str]
path = tuple(split_path)
self.path = path # type: Tuple[str, ...]
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
del outer_context # Unused
return table_context.lookup(self.path)
class Unnest(DataframeNode, MarkerSyntaxTreeNode):
'''An expression unnesting an array into a column of data.'''
def __init__(self, array_node):
# type: (Array) -> None
self.array_node = array_node
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''See parent, DataframeNode'''
del outer_context # Unused
context = EvaluationContext(table_context)
result = self.array_node.evaluate(context)
if isinstance(result, TypedDataFrame):
raise ValueError('UNNEST({}) did not result in one column'.format(self.array_node))
result_type, = result.types
if not isinstance(result_type, BQArray):
raise ValueError("UNNESTing a non-array-typed value: {}".format(result_type))
contained_type = result_type.type_
if len(result.series) != 1:
raise ValueError('UNNEST({}) did not result in one row'.format(self.array_node))
result_array, = result.to_list()
if not isinstance(result_array, tuple):
raise ValueError("UNNEST({}) resulted in {!r} rather than an array"
.format(self.array_node, result_array))
if isinstance(contained_type, BQStructType):
i = 0
columns = []
for field in contained_type.fields:
if field:
columns.append(field)
else:
columns.append('f{}_'.format(i))
i += 1
result_dataframe = TypedDataFrame(
pd.DataFrame([[cell for cell in row] for row in result_array], columns=columns),
contained_type.types)
else:
result_dataframe = TypedDataFrame(
pd.DataFrame([[cell] for cell in result_array], columns=['f0_']), [contained_type])
return result_dataframe, None
| bsd-3-clause |
rseubert/scikit-learn | sklearn/tests/test_dummy.py | 27 | 17468 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/externals/joblib/__init__.py | 72 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
pnedunuri/scipy | scipy/interpolate/fitpack2.py | 39 | 61117 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
if not np.isfinite(x).all() or not np.isfinite(y).all():
raise ValueError("x and y array must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not np.isfinite(w).all()):
raise ValueError("Input must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not np.isfinite(w).all() or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx, ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| bsd-3-clause |
BennerLab/atg | atg/data/ensembl.py | 1 | 7239 | """
Find species data in Ensembl, recording genome and annotation URLs.
"""
import os
import sys
import pandas
import ftplib
import string
import atg.config
import atg.data.retrieve
ENSEMBL_SPECIES_INFORMATION = 'ftp://ftp.ensemblgenomes.org/pub/release-35/species.txt'
ENSEMBL_DNA_BASE_LOCATION = string.Template('pub/release-35/$division/fasta$collection/$species/dna/')
ENSEMBL_GTF_BASE_LOCATION = string.Template('pub/release-35/$division/gtf$collection/$species/$assembly.'
'$version.gtf.gz')
class EnsemblSpecies:
"""
A class for fetching and managing species data from Ensembl Genomes, which include many organisms not found on
the main Ensembl site. Files for these organisms are stored in individual subfolders in e.g.
~/ATGData/ensemblgenomes/.
"""
def __init__(self):
self.data_root = os.path.expanduser(atg.config.settings['Data']['Root'])
ensembl_genome_file = os.path.join(self.data_root, 'ensembl_species.txt')
if not os.path.exists(ensembl_genome_file):
atg.data.retrieve.fetch_url(ENSEMBL_SPECIES_INFORMATION, ensembl_genome_file)
self.ensembl_species_df = pandas.read_csv(ensembl_genome_file, index_col=False, sep='\t')
def get_species_information(self, species):
"""
:param species: genus and species (as named by Ensembl), e.g. zea_mays
:return: dictionary containing URLs to genome fasta and gene annotation (GTF), if found
"""
if sum(self.ensembl_species_df.species.isin([species])) == 0:
return {'species': species}
# pull out first matching record
ensembl_record = self.ensembl_species_df.loc[self.ensembl_species_df['species'] == species].iloc[0]
ensembl_division = ensembl_record.loc['division'].lstrip('Ensembl').lower()
# could access assembly ID or accession from record, but the Ensembl files don't use one consistently
ensembl_core_db = ensembl_record.loc['core_db']
if "collection" in ensembl_core_db:
collection_path = '/' + ensembl_core_db.split('_core_')[0]
else:
collection_path = ''
with ftplib.FTP('ftp.ensemblgenomes.org') as ftp:
ftp.login()
genome_listing = ftp.nlst(ENSEMBL_DNA_BASE_LOCATION.safe_substitute(division=ensembl_division,
species=species,
collection=collection_path))
genome_location = ''
annotation_location = ''
genome_assembly_version = ''
# find toplevel unmasked genome
for filename in genome_listing:
if 'dna.toplevel' in filename:
genome_location = filename
break
if genome_location != '':
genome_filename = genome_location.split('/')[-1]
genome_assembly = genome_filename.rstrip('.dna.toplevel.fa.gz')
genome_assembly_version = genome_assembly.split('.', maxsplit=1)[1]
annotation_listing = ftp.nlst(ENSEMBL_GTF_BASE_LOCATION.safe_substitute(division=ensembl_division,
species=species,
assembly=genome_assembly,
collection=collection_path,
version=35))
if len(annotation_listing) == 0:
annotation_location = ''
elif len(annotation_listing) == 1:
annotation_location = annotation_listing[0]
else:
annotation_location = 'multiple'
ftp.close()
return {'species': species, 'genome': genome_location, 'annotation': annotation_location,
'version': genome_assembly_version}
def collect_species_information(self, species_list):
"""
Given a list of species names, create a dataframe containing all information
:param species_list:
:return: dataframe
"""
record_list = []
for species in species_list:
record_list.append(self.get_species_information(species))
return pandas.DataFrame.from_records(record_list)
def retrieve_species_data(self, species):
"""
Download data from Ensembl.
:param species:
:return: True if successful
"""
species_information = self.get_species_information(species)
if len(species_information) == 1:
return False
ensembl_species_path = os.path.join(self.data_root, 'ensemblgenomes', species)
os.makedirs(ensembl_species_path, exist_ok=True)
for filetype in ('genome', 'annotation'):
filename = os.path.split(species_information[filetype])[-1].rstrip('.gz') # remove .gz extension if present
ensembl_url = 'ftp://ftp.ensemblgenomes.org/' + species_information[filetype]
output_filename = os.path.join(ensembl_species_path, filename)
atg.data.retrieve.fetch_url(ensembl_url, output_filename)
return True
def retrieve_ensembl_species(namespace):
# get list of species from file or namespace
if namespace.list:
species_list = pandas.read_csv(namespace.species_name[0], index_col=False, header=None).iloc[:, 0].tolist()
else:
species_list = namespace.species_name
tracker = EnsemblSpecies()
# output species information as table, or download
if namespace.table:
species_df = tracker.collect_species_information(species_list)
species_df.to_csv(sys.stdout, sep="\t", index=False, columns=['species', 'genome', 'annotation', 'version'])
else:
for species in species_list:
retrieval_success = tracker.retrieve_species_data(species)
if retrieval_success:
print('%s retrieved successfully.' % species)
else:
print('%s information not retrieved.' % species)
def setup_subparsers(subparsers):
retrieval_parser = subparsers.add_parser('species', help="Fetch an organism by genus and species")
retrieval_parser.add_argument('species_name', nargs="+",
help="one or more genus/species for an organism in Ensembl, e.g. zea_mays")
retrieval_parser.add_argument('-l', '--list', action="store_true", help="species are provided in a text file given"
"as the only argument")
retrieval_parser.add_argument('-t', '--table', action="store_true",
help="instead of downloading data, write the species information to stdout")
# retrieval_parser.add_argument('-o', '--overwrite', action="store_true", help="Overwrite existing files")
retrieval_parser.set_defaults(func=retrieve_ensembl_species)
| gpl-3.0 |
kinghadi/word_cloud | doc/sphinxext/gen_rst.py | 17 | 33207 | """
Example generation for the python wordcloud project. Stolen from scikit-learn with modifications from PyStruct.
Generate the rst files for the examples by iterating over the python
example files.
Hacked to plot every example (not only those that start with 'plot').
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import re
import urllib2
import gzip
import posixpath
import codecs
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
'package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 16px;
top: 0;
left: 0;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
-webkit-transition: all 0.15s ease-out; /* Saf3.2+, Chrome */
-moz-transition: all 0.15s ease-out; /* FF4+ */
-ms-transition: all 0.15s ease-out; /* IE10? */
-o-transition: all 0.15s ease-out; /* Opera 10.5+ */
transition: all 0.15s ease-out;
background-repeat: no-repeat;
/* --> Thumbnail image size */
width: 150px;
height: 130px;
}
.figure img {
display: inline;
}
.figure .caption {
text-align: center !important;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and (check_docstring == True)):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:,0].astype(np.str),
unsorted[:,1].astype(np.float)))
return np.array(unsorted[index][:,0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`%s`
.. raw:: html
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy', 'wordcloud']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery:
# generate the plot as png image if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime <=
os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt, '__file__': src_file}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception:
#print 'extracting function failed'
#print err
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
if os.path.exists(first_image_file):
make_thumbnail(first_image_file, thumb_file, 200, 140)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['wordcloud'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['sklearn'] = SphinxDocLinkResolver(
'http://scikit-learn.org/stable')
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with codecs.open(full_fname, 'rt', encoding='utf-8') as fid:
lines_in = fid.readlines()
fid.close()
with open(full_fname, 'wt') as fid:
for line in lines_in:
for name, link in str_repl.iteritems():
try:
line = line.encode("ascii", 'ignore').replace(name, link)
except Exception as e:
print(line)
print(name)
print(link)
raise e
fid.write(line)
fid.close()
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The model is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| mit |
jamesp/shallowwater | beta_plane/exoplanet.py | 1 | 4070 | import sys
import numpy as np
import xarray as xr
from tqdm import tqdm
from shallowwater import PeriodicLinearShallowWater
from plotting import plot_wind_arrows
nx = 128*4
ny = 129
nd = 25 # number of days to run
DAY = 86400
RADIUS = 6371e3
PLOT = False
SNAP_DAYS = 5
# # Radius of deformation: Rd = sqrt(2 c / beta)
Rd = 3000.0e3 # Fix Rd at 2000km
Lx = 4*np.pi*RADIUS
Ly = Lx//4
beta0=3e-13
# Kelvin/gravity wave speed: c = sqrt(phi0)
phi0 = float(sys.argv[1])
c = np.sqrt(phi0)
delta_phi = phi0*0.1
print('c', c)
# cfl = 0.4 # For numerical stability CFL = |u| dt / dx < 1.0
# dx = Lx / nx
# dt = np.floor(cfl * dx / (c*4))
# print('dt', dt)
if c > 32:
dt = 600
else:
dt = 1200
tau_rad = 4.0*DAY
tau_fric = 4.0*DAY
class MatsunoGill(PeriodicLinearShallowWater):
def __init__(self, nx, ny, Lx, Ly, alpha, beta, phi0,
tau_fric, tau_rad, dt=dt, nu=5.0e2, r=1e-4):
super(MatsunoGill, self).__init__(nx, ny, Lx, Ly, beta=beta, g=1.0, H=phi0, f0=0.0, dt=dt, nu=nu, r=r)
self.alpha = alpha
self.phi0 = phi0
#self.phi[:] += phi0
def to_dataset(self):
dataset = super(MatsunoGill, self).to_dataset()
dataset['phi_eq'] = xr.DataArray(self.phi_eq().T.copy(), coords=(dataset.y, dataset.x))
dataset['phi_eq_xi'] = xr.DataArray(self.centre_substellar(self.phi_eq()).T.copy(), coords=(dataset.y, dataset.x))
dataset['phi_xi'] = xr.DataArray(self.centre_substellar(self.phi).T.copy(), coords=(dataset.y, dataset.x))
return dataset
def substellarx(self, t=None):
if t is None:
t = self.t
return np.fmod(t*self.alpha*self.c, self.Lx)
@property
def c(self):
return np.sqrt(self.phi0)
@property
def phixi(self):
subx = self.substellarx()
sx = self.phix - subx
sx[sx < -self.Lx/2] = sx[sx < -self.Lx/2] + self.Lx
sx[sx > self.Lx/2] = sx[sx > self.Lx/2] - self.Lx
return sx
def centre_substellar(self, psi):
subi = np.argmin(self.phixi**2)
return np.roll(psi, self.nx//2 - subi, axis=0)
def phi_eq(self):
return delta_phi*np.exp(-((self.phixi)**2 + self.phiy**2) / (Rd**2))
def rhs(self):
u, v, phi = self.state
# phi rhs
dphi = np.zeros_like(phi)
du, dv = np.zeros_like(self.u), np.zeros_like(self.v)
# Newtonian cooling / Rayleigh Friction
dphi += (self.phi_eq() - phi)/tau_rad
du -= u / tau_fric
dv -= v / tau_fric
return np.array([du, dv, dphi])
alphas = [-2., -1., -.75, -.5, -.25, -.1, 0., .1, .25, .5, .75, 1., 2.]
betas = [1, 3, 10, 30, 100, 300]
#betas = [1., 10., 100.]
#alphas = [0.]
#betas = [1]
odata = []
if PLOT:
import matplotlib.pyplot as plt
plt.ion()
fig, ax = plt.subplots()
for b in tqdm(betas):
beta = b*beta0
bdata = []
for a in tqdm(alphas):
atmos = MatsunoGill(nx, ny, Lx, Ly, beta=beta, alpha=a,
phi0=phi0, tau_fric=tau_fric, tau_rad=tau_rad,
dt=dt, nu=5.0e3)
snapshots = []
def take_snapshot():
dset = atmos.to_dataset()
dset.coords['time'] = atmos.t
snapshots.append(dset)
take_snapshot()
prog = tqdm(range(int(nd*DAY/dt)))
for i in prog:
atmos.step()
if atmos.t % (86400*SNAP_DAYS) == 0:
#print('%.1f\t%.2f' % (atmos.t/DAY, np.max(atmos.u**2)))
take_snapshot()
prog.set_description('u: %.2f' % atmos.u.max())
if PLOT:
plt.clf()
dset.phi.plot.contourf(levels=13)
plt.show()
plt.pause(0.01)
adata = xr.concat(snapshots, dim='time')
adata.coords['alpha'] = a
bdata.append(adata)
data = xr.concat(bdata, dim='alpha')
data.coords['beta'] = b
odata.append(data)
data = xr.concat(odata, dim='beta')
data.to_netcdf('/Users/jp492/Dropbox/data/beta_data_linear_h%.0f.nc' % (phi0)) | mit |
Clyde-fare/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
ml-lab/neuralnilm | neuralnilm/data/source.py | 4 | 3500 | from __future__ import print_function, division
from copy import copy
import numpy as np
import pandas as pd
from .batch import Batch
import logging
logger = logging.getLogger(__name__)
class Sequence(object):
"""
Attributes
----------
input : np.ndarray
target : np.ndarray
all_appliances : pd.DataFrame
Column names are the appliance names.
metadata : dict
weights : np.ndarray or None
"""
def __init__(self, shape):
self.input = np.zeros(shape, dtype=np.float32)
self.target = np.zeros(shape, dtype=np.float32)
self.all_appliances = pd.DataFrame()
self.metadata = {}
self.weights = None
class Source(object):
def __init__(self, rng_seed=None, num_batches_for_validation=16):
logger.info("------------- INITIALISING {} --------------"
.format(self.__class__.__name__))
self.rng_seed = rng_seed
self.rng = np.random.RandomState(rng_seed)
self.num_batches_for_validation = num_batches_for_validation
def get_sequence(self, validation=False):
"""
Returns
-------
sequence : Sequence
"""
raise NotImplementedError()
def get_batch(self, num_seq_per_batch, fold='train',
enable_all_appliances=False, validation=False):
"""
Returns
-------
iterators of Batch objects
"""
seq_iterator = self.get_sequence(
fold=fold,
enable_all_appliances=enable_all_appliances)
stop = False
batch_i = 0
while not stop:
if validation and batch_i == self.num_batches_for_validation:
break
input_sequences = []
target_sequences = []
weights = []
all_appliances = {}
for i in range(num_seq_per_batch):
try:
seq = seq_iterator.next()
except StopIteration:
stop = True
seq = Sequence((self.seq_length, 1))
seq.weights = np.zeros(
(self.seq_length, 1), dtype=np.float32)
if enable_all_appliances:
all_appliances[i] = seq.all_appliances
input_sequences.append(seq.input[np.newaxis, :])
target_sequences.append(seq.target[np.newaxis, :])
if seq.weights is not None:
weights.append(seq.weights[np.newaxis, :])
batch = Batch()
batch.metadata['fold'] = fold
batch.metadata['source_name'] = self.__class__.__name__
batch.before_processing.input = np.concatenate(input_sequences)
del input_sequences
batch.before_processing.target = np.concatenate(target_sequences)
del target_sequences
if enable_all_appliances:
batch.all_appliances = pd.concat(
all_appliances, axis=1, names=['sequence', 'appliance'])
if weights:
batch.weights = np.concatenate(weights)
yield batch
batch_i += 1
@classmethod
def _attrs_to_remove_for_report(cls):
return ['activations', 'rng']
def report(self):
report = copy(self.__dict__)
report['name'] = self.__class__.__name__
for attr in self._attrs_to_remove_for_report():
report.pop(attr, None)
return report
| apache-2.0 |
MJuddBooth/pandas | pandas/tests/extension/base/printing.py | 2 | 1231 | import io
import pytest
import pandas as pd
from pandas import compat
from .base import BaseExtensionTests
class BasePrintingTests(BaseExtensionTests):
"""Tests checking the formatting of your EA when printed."""
@pytest.mark.parametrize("size", ["big", "small"])
def test_array_repr(self, data, size):
if size == "small":
data = data[:5]
else:
data = type(data)._concat_same_type([data] * 5)
result = repr(data)
assert data.__class__.__name__ in result
assert 'Length: {}'.format(len(data)) in result
assert str(data.dtype) in result
if size == 'big':
assert '...' in result
def test_array_repr_unicode(self, data):
result = compat.text_type(data)
assert isinstance(result, compat.text_type)
def test_series_repr(self, data):
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
def test_dataframe_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
def test_dtype_name_in_info(self, data):
buf = io.StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
result = buf.getvalue()
assert data.dtype.name in result
| bsd-3-clause |
ganeshgore/myremolab | server/src/test/util/stress.py | 2 | 8986 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import time
import time as time_module
import threading
import voodoo.counter as counter
DEBUGGING = True
def avg(l):
total = 0
for value in l:
total += value
return float(total) / len(l)
class RunningThread(threading.Thread):
def __init__(self, condition, iterations, func, name):
threading.Thread.__init__(self)
self.setName(counter.next_name("RunningThread_%s" % self.name))
self.condition = condition
self.func = func
self.times = []
self.iterations = iterations
self.waiting = False
def run(self):
self.condition.acquire()
try:
self.waiting = True
self.condition.wait()
finally:
self.condition.release()
for _ in xrange(self.iterations):
t1 = time.time()
self.func()
t2 = time.time()
self.times.append(t2 - t1)
class ThreadedRunner(object):
MAX_WAITING_TIME = 15
def __init__(self, func, threads, iterations, name):
self.func = func
self.threads = threads
self.iterations = iterations
self.name = name
self.times = []
def run(self):
condition = threading.Condition()
threads = []
for _ in xrange(self.threads):
thread = RunningThread(condition, self.iterations, self.func, self.name)
threads.append(thread)
for thread in threads:
thread.start()
while len([ t for t in threads if t.waiting ]) != self.threads:
time_module.sleep(0.05)
condition.acquire()
try:
condition.notifyAll()
finally:
condition.release()
self.times = []
if DEBUGGING:
import sys, time
start_time = time.time()
waiting_threads = threads[:]
while len(waiting_threads) > 0:
number_before = len(waiting_threads)
waiting_threads = [ t for t in waiting_threads if t.isAlive() ]
for _ in xrange(number_before - len(waiting_threads)):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.1)
if time.time() - start_time > self.MAX_WAITING_TIME:
break
print
for thread in threads:
thread.join(self.MAX_WAITING_TIME)
if not thread.isAlive():
self.times.extend(thread.times)
return self.times
class SequentialRunner(object):
def __init__(self, func, iterations, name):
self.func = func
self.iterations = iterations
self.name = name
self.times = []
def run(self):
self.times = []
for _ in xrange(self.iterations):
t1 = time.time()
self.func()
t2 = time.time()
self.times.append(t2 - t1)
return self.times
class MainRunner(object):
MATPLOTLIB_BACKEND = 'Agg'
DEFAULT_NUMBER_OF_TIMES = 2
matplotlib = None
plt = None
def __init__(self, func, name, assert_klass = AssertionError):
self.func = func
self.name = name
self.assert_klass = assert_klass
self.runner = None
def run_threaded(self, threads, iterations, max_time):
self.runner = ThreadedRunner(self.func, threads, iterations, self.name)
return self._run(self.runner, max_time)
def run_sequential(self, iterations, max_time):
self.runner = SequentialRunner(self.func, iterations, self.name)
return self._run(self.runner, max_time)
def _run(self, runner, max_time):
times = runner.run()
slow_times = [ t for t in times if t > max_time]
if len(slow_times) > 0:
raise self.assert_klass("MainRunner %s; Too slow: %s; avg = %s; max = %s which > %s" % (self.name, slow_times, avg(times), max(slow_times), max_time))
return times
def print_graphics_threaded(self, filename, threads_configurations, iterations, number_of_times = None):
if number_of_times is None:
number_of_times = self.DEFAULT_NUMBER_OF_TIMES
total_results = []
last_times = []
for _ in xrange(number_of_times):
results = []
for threads in threads_configurations:
if DEBUGGING:
print " running %s threads..." % threads
runner = ThreadedRunner(self.func, threads, iterations, self.name)
times = runner.run()
max_time = max(times)
avg_time = avg(times)
min_time = min(times)
results.append((threads, max_time, avg_time, min_time))
last_times.append(times)
total_results.append(results)
self._print_results('threads', total_results, filename, last_times)
def print_graphics_sequential(self, filename, iterations_configurations, number_of_times = None):
if number_of_times is None:
number_of_times = self.DEFAULT_NUMBER_OF_TIMES
total_results = []
last_times = []
for _ in xrange(number_of_times):
results = []
for iterations in iterations_configurations:
if DEBUGGING:
print " running %s iterations..." % iterations
runner = SequentialRunner(self.func, iterations, self.name)
times = runner.run()
max_time = max(times)
avg_time = avg(times)
min_time = min(times)
results.append((iterations, max_time, avg_time, min_time))
last_times.append(times)
total_results.append(results)
self._print_results('iterations', total_results, filename, last_times)
def _print_results(self, variable, total_results, filename, last_times):
grouped_results = [ zip(*new_results) for new_results in zip(*total_results) ]
results = [ (variables[0], avg(max_n), avg(avg_n), avg(min_n)) for variables, max_n, avg_n, min_n in grouped_results ]
if self.matplotlib is None:
import matplotlib
matplotlib.use(self.MATPLOTLIB_BACKEND)
import matplotlib.pyplot as plt
MainRunner.matplotlib = matplotlib
MainRunner.plt = plt
xs = []
ys_max = []
ys_avg = []
ys_min = []
for variable, max_time, avg_time, min_time in results:
xs.append(variable)
ys_max.append(max_time)
ys_avg.append(avg_time)
ys_min.append(min_time)
fig = self.plt.figure()
ax = fig.add_subplot(111)
ax.plot(xs, ys_max, 'r-')
ax.plot(xs, ys_avg, 'g-')
ax.plot(xs, ys_min, 'b-')
xlabel = 'red: max; green: avg; blue: min\nfunc: %s; variable: %s' % (self.func.__name__, variable)
ax.set_xlabel(xlabel)
self.plt.savefig(filename)
fig = self.plt.figure()
ax = fig.add_subplot(111)
ax.plot(xs, ys_avg, 'g-')
ax.plot(xs, ys_min, 'b-')
xlabel = 'green: avg; blue: min\nfunc: %s; variable: %s' % (self.func.__name__, variable)
ax.set_xlabel(xlabel)
self.plt.savefig(filename.replace('.png','_without_max.png'))
# Now the distribution of the last sample
for times in last_times:
times.sort()
avg_times = [ avg(x) for x in zip(*last_times) ]
max_time = max(avg_times)
intervals = 200
interval_size = 1.0 * max_time / intervals
xs = []
ys = []
for interval_number in xrange(intervals):
max_size = (interval_number + 1) * interval_size
min_size = interval_number * interval_size
avg_size = (max_size + min_size) / 2.0
xs.append(avg_size)
ys.append(len([ t for t in avg_times if t > min_size and t <= max_size]))
fig = self.plt.figure()
ax = fig.add_subplot(111)
ax.plot(xs, ys, 'b-')
xlabel = 'Distribution'
ax.set_xlabel(xlabel)
self.plt.savefig(filename.replace('.png','_last_distribution.png'))
fig = self.plt.figure()
ax = fig.add_subplot(111)
ax.plot(xs, ys, 'b-')
xlabel = 'Distribution'
ax.set_xlabel(xlabel)
ax.axis([0,max_time,0,15])
self.plt.savefig(filename.replace('.png','_last_distribution_tail.png'))
| bsd-2-clause |
Sentient07/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
tempbottle/Nuitka | tests/library/compile_library.py | 1 | 4896 | #!/usr/bin/env python
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Softwar where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, tempfile, subprocess
# Find common code relative in file system. Not using packages for test stuff.
sys.path.insert(
0,
os.path.normpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
".."
)
)
)
from test_common import (
my_print, # @UnresolvedImport
setup, # @UnresolvedImport
compareWithCPython, # @UnresolvedImport
createSearchMode # @UnresolvedImport
)
python_version = setup(needs_io_encoding = True)
search_mode = createSearchMode()
start_at = sys.argv[2] if len(sys.argv) > 2 else None
if start_at:
active = False
else:
active = True
os_path = os.path.normcase(os.path.dirname(os.__file__))
my_print("Using standard library path", os_path)
try:
import numpy
extra_path = os.path.normcase(
os.path.dirname(
os.path.dirname(
numpy.__file__
)
)
)
my_print("Using extra library path", extra_path)
except ImportError:
extra_path = os_path
try:
import matplotlib
extra_path2 = os.path.normcase(
os.path.dirname(
os.path.dirname(
matplotlib.__file__
)
)
)
my_print("Using extra2 library path", extra_path2)
except ImportError:
extra_path2 = os_path
os_path = os.path.normpath(os_path)
extra_path = os.path.normpath(extra_path)
tmp_dir = tempfile.gettempdir()
# Try to avoid RAM disk /tmp and use the disk one instead.
if tmp_dir == "/tmp" and os.path.exists("/var/tmp"):
tmp_dir = "/var/tmp"
stage_dir = os.path.join(tmp_dir, "compile_library")
blacklist = (
"__phello__.foo.py", # Triggers error for "." in module name
)
def compilePath(path):
global active
for root, _dirnames, filenames in os.walk(path):
filenames = [
filename
for filename in filenames
if filename.endswith(".py")
if filename not in blacklist
]
for filename in sorted(filenames):
if "(" in filename:
continue
path = os.path.join(root, filename)
if not active and start_at in ( filename, path ):
active = True
if not active:
continue
command = [
sys.executable,
os.path.join(
"..",
"..",
"bin",
"nuitka"
),
"--module",
"--output-dir",
stage_dir,
"--recurse-none",
"--remove-output"
]
command += os.environ.get("NUITKA_EXTRA_OPTIONS", "").split()
command.append(path)
my_print(path, ":", end = " ")
sys.stdout.flush()
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
my_print("Falling back to full comparison due to error exit.")
compareWithCPython(
dirname = None,
filename = path,
extra_flags = ["expect_failure"],
search_mode = search_mode,
needs_2to3 = False
)
else:
my_print("OK")
if os.name == "nt":
suffix = "pyd"
else:
suffix = "so"
target_filename = os.path.basename(path).replace(".py","."+suffix)
target_filename = target_filename.replace("(","").replace(")","")
os.unlink(
os.path.join(
stage_dir, target_filename
)
)
compilePath(os_path)
if extra_path != os_path:
compilePath(extra_path)
if extra_path2 not in (os_path, extra_path):
compilePath(extra_path2)
| apache-2.0 |
wzbozon/statsmodels | statsmodels/sandbox/tsa/garch.py | 25 | 52178 | '''general non-linear MLE for time series analysis
idea for general version
------------------------
subclass defines geterrors(parameters) besides loglike,...
and covariance matrix of parameter estimates (e.g. from hessian
or outerproduct of jacobian)
update: I don't really need geterrors directly, but get_h the conditional
variance process
new version Garch0 looks ok, time to clean up and test
no constraints yet
in some cases: "Warning: Maximum number of function evaluations has been exceeded."
Notes
-----
idea: cache intermediate design matrix for geterrors so it doesn't need
to be build at each function call
superclass or result class calculates result statistic based
on errors, loglike, jacobian and cov/hessian
-> aic, bic, ...
-> test statistics, tvalue, fvalue, ...
-> new to add: distribution (mean, cov) of non-linear transformation
-> parameter restrictions or transformation with corrected covparams (?)
-> sse, rss, rsquared ??? are they defined from this in general
-> robust parameter cov ???
-> additional residual based tests, NW, ... likelihood ratio, lagrange
multiplier tests ???
how much can be reused from linear model result classes where
`errorsest = y - X*beta` ?
for tsa: what's the division of labor between model, result instance
and process
examples:
* arma: ls and mle look good
* arimax: add exog, especially mean, trend, prefilter, e.g. (1-L)
* arma_t: arma with t distributed errors (just a change in loglike)
* garch: need loglike and (recursive) errorest
* regime switching model without unobserved state, e.g. threshold
roadmap for garch:
* simple case
* starting values: garch11 explicit formulas
* arma-garch, assumed separable, blockdiagonal Hessian
* empirical example: DJI, S&P500, MSFT, ???
* other standard garch: egarch, pgarch,
* non-normal distributions
* other methods: forecast, news impact curves (impulse response)
* analytical gradient, Hessian for basic garch
* cleaner simulation of garch
* result statistics, AIC, ...
* parameter constraints
* try penalization for higher lags
* other garch: regime-switching
for pgarch (power garch) need transformation of etax given
the parameters, but then misofilter should work
general class aparch (see garch glossary)
References
----------
see notes_references.txt
Created on Feb 6, 2010
@author: "josef pktd"
'''
from __future__ import print_function
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import optimize, signal
import matplotlib.pyplot as plt
import numdifftools as ndt
from statsmodels.base.model import Model, LikelihoodModelResults
from statsmodels.sandbox import tsa
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def normloglike(x, mu=0, sigma2=1, returnlls=False, axis=0):
x = np.asarray(x)
x = np.atleast_1d(x)
if axis is None:
x = x.ravel()
#T,K = x.shape
if x.ndim > 1:
nobs = x.shape[axis]
else:
nobs = len(x)
x = x - mu # assume can be broadcasted
if returnlls:
#Compute the individual log likelihoods if needed
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + x**2/sigma2)
# Use these to comput the LL
LL = np.sum(lls,axis)
return LL, lls
else:
#Compute the log likelihood
#print(np.sum(np.log(sigma2),axis))
LL = -0.5 * (np.sum(np.log(sigma2),axis) + np.sum((x**2)/sigma2, axis) + nobs*np.log(2*np.pi))
return LL
# copied from model.py
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None):
super(LikelihoodModel, self).__init__(endog, exog)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
#TODO: if the intent is to re-initialize the model with new data then
# this method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
An optional
method : str
Method can be 'newton', 'bfgs', 'powell', 'cg', or 'ncg'.
The default is newton. See scipy.optimze for more information.
"""
methods = ['newton', 'bfgs', 'powell', 'cg', 'ncg', 'fmin']
if start_params is None:
start_params = [0]*self.exog.shape[1] # will fail for shape (K,)
if not method in methods:
raise ValueError("Unknown fit method %s" % method)
f = lambda params: -self.loglike(params)
score = lambda params: -self.score(params)
# hess = lambda params: -self.hessian(params)
hess = None
#TODO: can we have a unified framework so that we can just do func = method
# and write one call for each solver?
if method.lower() == 'newton':
iteration = 0
start = np.array(start_params)
history = [np.inf, start]
while (iteration < maxiter and np.all(np.abs(history[-1] - \
history[-2])>tol)):
H = self.hessian(history[-1])
newparams = history[-1] - np.dot(np.linalg.inv(H),
self.score(history[-1]))
history.append(newparams)
iteration += 1
mlefit = LikelihoodModelResults(self, newparams)
mlefit.iteration = iteration
elif method == 'bfgs':
score=None
xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
optimize.fmin_bfgs(f, start_params, score, full_output=1,
maxiter=maxiter, gtol=tol)
converge = not warnflag
mlefit = LikelihoodModelResults(self, xopt)
optres = 'xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag'
self.optimresults = dict(zip(optres.split(', '),[
xopt, fopt, gopt, Hopt, func_calls, grad_calls, warnflag]))
elif method == 'ncg':
xopt, fopt, fcalls, gcalls, hcalls, warnflag = \
optimize.fmin_ncg(f, start_params, score, fhess=hess,
full_output=1, maxiter=maxiter, avextol=tol)
mlefit = LikelihoodModelResults(self, xopt)
converge = not warnflag
elif method == 'fmin':
#fmin(func, x0, args=(), xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None)
xopt, fopt, niter, funcalls, warnflag = \
optimize.fmin(f, start_params,
full_output=1, maxiter=maxiter, xtol=tol)
mlefit = LikelihoodModelResults(self, xopt)
converge = not warnflag
self._results = mlefit
return mlefit
#TODO: I take it this is only a stub and should be included in another
# model class?
class TSMLEModel(LikelihoodModel):
"""
univariate time series model for estimation with maximum likelihood
Note: This is not working yet
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(TSMLEModel, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def geterrors(self, params):
raise NotImplementedError
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
raise NotImplementedError
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print(params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
'''estimate model by minimizing negative loglikelihood
does this need to be overwritten ?
'''
if start_params is None and hasattr(self, '_start_params'):
start_params = self._start_params
#start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(TSMLEModel, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
class Garch0(TSMLEModel):
'''Garch model,
still experimentation stage:
simplified structure, plain garch, no constraints
still looking for the design of the base class
serious bug:
ar estimate looks ok, ma estimate awful
-> check parameterization of lagpolys and constant
looks ok after adding missing constant
but still difference to garch11 function
corrected initial condition
-> only small differences left between the 3 versions
ar estimate is close to true/DGP model
note constant has different parameterization
but design looks better
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch0, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
# put this in fit (?) or in initialize instead
self._etax = endog**2
self._icetax = np.atleast_1d(self._etax.mean())
def initialize(self):
pass
def geth(self, params):
'''
Parameters
----------
params : tuple, (ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma, mu = params
#etax = self.endog #this would be enough for basic garch version
etax = self._etax + mu
icetax = self._icetax #read ic-eta-x, initial condition
#TODO: where does my go with lfilter ?????????????
# shouldn't matter except for interpretation
nobs = etax.shape[0]
#check arguments of lfilter
zi = signal.lfiltic(ma,ar, icetax)
#h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
#just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
h = signal.lfilter(ma, ar, etax, zi=zi)[0]
return h
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
make more generic with using function _convertparams
which could also include parameter transformation
_convertparams_in, _convertparams_out
allow for different distributions t, ged,...
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
# check where constant goes
#ma = np.zeros((q+1,3))
#ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma = np.concatenate(([0], params[p:p+q]))
mu = params[-1]
params = (ar, ma, mu) #(ar, ma)
h = self.geth(params)
#temporary safe for debugging:
self.params_converted = params
self.h = h #for testing
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(h)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #no choice of axis
# same as with y = self.endog, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
class GarchX(TSMLEModel):
'''Garch model,
still experimentation stage:
another version, this time with exog and miso_filter
still looking for the design of the base class
not done yet, just a design idea
* use misofilter as in garch (gjr)
* but take etax = exog
this can include constant, asymetric effect (gjr) and
other explanatory variables (e.g. high-low spread)
todo: renames
eta -> varprocess
etax -> varprocessx
icetax -> varprocessic (is actually ic of eta/sigma^2)
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch0, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
# put this in fit (?) or in initialize instead
#nobs defined in super - verify
#self.nobs = nobs = endog.shape[0]
#add nexog to super
#self.nexog = nexog = exog.shape[1]
self._etax = np.column_stack(np.ones((nobs,1)), endog**2, exog)
self._icetax = np.atleast_1d(self._etax.mean())
def initialize(self):
pass
def convert_mod2params(ar, ma, mu):
pass
def geth(self, params):
'''
Parameters
----------
params : tuple, (ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma, mu = params
#etax = self.endog #this would be enough for basic garch version
etax = self._etax + mu
icetax = self._icetax #read ic-eta-x, initial condition
#TODO: where does my go with lfilter ?????????????
# shouldn't matter except for interpretation
nobs = self.nobs
## #check arguments of lfilter
## zi = signal.lfiltic(ma,ar, icetax)
## #h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
## #just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
## h = signal.lfilter(ma, ar, etax, zi=zi)[0]
##
h = miso_lfilter(ar, ma, etax, useic=self._icetax)[0]
#print('h.shape', h.shape
hneg = h<0
if hneg.any():
#h[hneg] = 1e-6
h = np.abs(h)
#todo: raise warning, maybe not during optimization calls
return h
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
make more generic with using function _convertparams
which could also include parameter transformation
_convertparams_in, _convertparams_out
allow for different distributions t, ged,...
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
# check where constant goes
#ma = np.zeros((q+1,3))
#ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma = np.concatenate(([0], params[p:p+q]))
mu = params[-1]
params = (ar, ma, mu) #(ar, ma)
h = self.geth(params)
#temporary safe for debugging:
self.params_converted = params
self.h = h #for testing
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(h)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #no choice of axis
# same as with y = self.endog, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
class Garch(TSMLEModel):
'''Garch model gjrgarch (t-garch)
still experimentation stage, try with
'''
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Garch, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def initialize(self):
pass
def geterrors(self, params):
'''
Parameters
----------
params : tuple, (mu, ar, ma)
try to keep the params conversion in loglike
copied from generate_gjrgarch
needs to be extracted to separate function
'''
#mu, ar, ma = params
ar, ma = params
eta = self.endog
nobs = eta.shape[0]
etax = np.empty((nobs,3))
etax[:,0] = 1
etax[:,1:] = (eta**2)[:,None]
etax[eta>0,2] = 0
#print('etax.shape', etax.shape
h = miso_lfilter(ar, ma, etax, useic=np.atleast_1d(etax[:,1].mean()))[0]
#print('h.shape', h.shape
hneg = h<0
if hneg.any():
#h[hneg] = 1e-6
h = np.abs(h)
#print('Warning negative variance found'
#check timing, starting time for h and eta, do they match
#err = np.sqrt(h[:len(eta)])*eta #np.random.standard_t(8, size=len(h))
# let it break if there is a len/shape mismatch
err = np.sqrt(h)*eta
return err, h, etax
def loglike(self, params):
"""
Loglikelihood for timeseries model
Notes
-----
needs to be overwritten by subclass
"""
p, q = self.nar, self.nma
ar = np.concatenate(([1], params[:p]))
#ar = np.concatenate(([1], -np.abs(params[:p]))) #???
#better safe than fast and sorry
#
ma = np.zeros((q+1,3))
ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma[:,1] = np.concatenate(([0], params[p:p+q]))
#delta lag coefficients for negative ma innovation
ma[:,2] = np.concatenate(([0], params[p+q:p+2*q]))
mu = params[-1]
params = (ar, ma) #(mu, ar, ma)
errorsest, h, etax = self.geterrors(params)
#temporary safe for debugging
self.params_converted = params
self.errorsest, self.h, self.etax = errorsest, h, etax
#h = h[:-1] #correct this in geterrors
#print('shapes errorsest, h, etax', errorsest.shape, h.shape, etax.shape
sigma2 = np.maximum(h, 1e-6)
axis = 0
nobs = len(errorsest)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
axis=0 #not used
# muy = errorsest.mean()
# # llike is verified, see below
# # same as with y = errorsest, ht = sigma2
# # np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
# llike = -0.5 * (np.sum(np.log(sigma2),axis)
# + np.sum(((errorsest)**2)/sigma2, axis)
# + nobs*np.log(2*np.pi))
# return llike
muy = errorsest.mean()
# llike is verified, see below
# same as with y = errorsest, ht = sigma2
# np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum()
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum(((self.endog)**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
def gjrconvertparams(self, params, nar, nma):
"""
flat to matrix
Notes
-----
needs to be overwritten by subclass
"""
p, q = nar, nma
ar = np.concatenate(([1], params[:p]))
#ar = np.concatenate(([1], -np.abs(params[:p]))) #???
#better safe than fast and sorry
#
ma = np.zeros((q+1,3))
ma[0,0] = params[-1]
#lag coefficients for ma innovation
ma[:,1] = np.concatenate(([0], params[p:p+q]))
#delta lag coefficients for negative ma innovation
ma[:,2] = np.concatenate(([0], params[p+q:p+2*q]))
mu = params[-1]
params2 = (ar, ma) #(mu, ar, ma)
return paramsclass
#TODO: this should be generalized to ARMA?
#can possibly also leverage TSME above
# also note that this is NOT yet general
# it was written for my homework, assumes constant is zero
# and that process is AR(1)
# examples at the end of run as main below
class AR(LikelihoodModel):
"""
Notes
-----
This is not general, only written for the AR(1) case.
Fit methods that use super and broyden do not yet work.
"""
def __init__(self, endog, exog=None, nlags=1):
if exog is None: # extend to handle ADL(p,q) model? or subclass?
exog = endog[:-nlags]
endog = endog[nlags:]
super(AR, self).__init__(endog, exog)
self.nobs += nlags # add lags back to nobs for real T
#TODO: need to fix underscore in Model class.
#Done?
def initialize(self):
pass
def loglike(self, params):
"""
The unconditional loglikelihood of an AR(p) process
Notes
-----
Contains constant term.
"""
nobs = self.nobs
y = self.endog
ylag = self.exog
penalty = self.penalty
if isinstance(params,tuple):
# broyden (all optimize.nonlin return a tuple until rewrite commit)
params = np.asarray(params)
usepenalty=False
if not np.all(np.abs(params)<1) and penalty:
oldparams = params
params = np.array([.9999]) # make it the edge
usepenalty=True
diffsumsq = sumofsq(y-np.dot(ylag,params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
loglike = -nobs/2 * np.log(2*np.pi) - nobs/2*np.log(sigma2) + \
.5 * np.log(1-params**2) - .5*diffsumsq/sigma2 -\
ylag[0]**2 * (1-params**2)/(2*sigma2)
if usepenalty:
# subtract a quadratic penalty since we min the negative of loglike
loglike -= 1000 *(oldparams-.9999)**2
return loglike
def score(self, params):
"""
Notes
-----
Need to generalize for AR(p) and for a constant.
Not correct yet. Returns numerical gradient. Depends on package
numdifftools.
"""
y = self.endog
ylag = self.exog
nobs = self.nobs
diffsumsq = sumofsq(y-np.dot(ylag,params))
dsdr = 1/nobs * -2 *np.sum(ylag*(y-np.dot(ylag,params))[:,None])+\
2*params*ylag[0]**2
sigma2 = 1/nobs*(diffsumsq-ylag[0]**2*(1-params**2))
gradient = -nobs/(2*sigma2)*dsdr + params/(1-params**2) + \
1/sigma2*np.sum(ylag*(y-np.dot(ylag, params))[:,None])+\
.5*sigma2**-2*diffsumsq*dsdr+\
ylag[0]**2*params/sigma2 +\
ylag[0]**2*(1-params**2)/(2*sigma2**2)*dsdr
if self.penalty:
pass
j = Jacobian(self.loglike)
return j(params)
# return gradient
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now. Depends on numdifftools.
"""
h = Hessian(self.loglike)
return h(params)
def fit(self, start_params=None, method='bfgs', maxiter=35, tol=1e-08,
penalty=False):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
start_params : array-like, optional
A first guess on the parameters. Defaults is a vector of zeros.
method : str, optional
Unconstrained solvers:
Default is 'bfgs', 'newton' (newton-raphson), 'ncg'
(Note that previous 3 are not recommended at the moment.)
and 'powell'
Constrained solvers:
'bfgs-b', 'tnc'
See notes.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol = float
The convergence tolerance. Default is 1e-08.
penalty : bool
Whether or not to use a penalty function. Default is False,
though this is ignored at the moment and the penalty is always
used if appropriate. See notes.
Notes
-----
The unconstrained solvers use a quadratic penalty (regardless if
penalty kwd is True or False) in order to ensure that the solution
stays within (-1,1). The constrained solvers default to using a bound
of (-.999,.999).
"""
self.penalty = penalty
method = method.lower()
#TODO: allow user-specified penalty function
# if penalty and method not in ['bfgs_b','tnc','cobyla','slsqp']:
# minfunc = lambda params : -self.loglike(params) - \
# self.penfunc(params)
# else:
minfunc = lambda params: -self.loglike(params)
if method in ['newton', 'bfgs', 'ncg']:
super(AR, self).fit(start_params=start_params, method=method,
maxiter=maxiter, tol=tol)
else:
bounds = [(-.999,.999)] # assume stationarity
if start_params == None:
start_params = np.array([0]) #TODO: assumes AR(1)
if method == 'bfgs-b':
retval = optimize.fmin_l_bfgs_b(minfunc, start_params,
approx_grad=True, bounds=bounds)
self.params, self.llf = retval[0:2]
if method == 'tnc':
retval = optimize.fmin_tnc(minfunc, start_params,
approx_grad=True, bounds = bounds)
self.params = retval[0]
if method == 'powell':
retval = optimize.fmin_powell(minfunc,start_params)
self.params = retval[None]
#TODO: write regression tests for Pauli's branch so that
# new line_search and optimize.nonlin can get put in.
#http://projects.scipy.org/scipy/ticket/791
# if method == 'broyden':
# retval = optimize.broyden2(minfunc, [.5], verbose=True)
# self.results = retval
class Arma(LikelihoodModel):
"""
univariate Autoregressive Moving Average model
Note: This is not working yet, or does it
this can subclass TSMLEModel
"""
def __init__(self, endog, exog=None):
#need to override p,q (nar,nma) correctly
super(Arma, self).__init__(endog, exog)
#set default arma(1,1)
self.nar = 1
self.nma = 1
#self.initialize()
def initialize(self):
pass
def geterrors(self, params):
#copied from sandbox.tsa.arima.ARIMA
p, q = self.nar, self.nma
rhoy = np.concatenate(([1], params[:p]))
rhoe = np.concatenate(([1], params[p:p+q]))
errorsest = signal.lfilter(rhoy, rhoe, self.endog)
return errorsest
def loglike(self, params):
"""
Loglikelihood for arma model
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector
"""
# #copied from sandbox.tsa.arima.ARIMA
# p = self.nar
# rhoy = np.concatenate(([1], params[:p]))
# rhoe = np.concatenate(([1], params[p:-1]))
# errorsest = signal.lfilter(rhoy, rhoe, self.endog)
errorsest = self.geterrors(params)
sigma2 = np.maximum(params[-1]**2, 1e-6)
axis = 0
nobs = len(errorsest)
#this doesn't help for exploding paths
#errorsest[np.isnan(errorsest)] = 100
# llike = -0.5 * (np.sum(np.log(sigma2),axis)
# + np.sum((errorsest**2)/sigma2, axis)
# + nobs*np.log(2*np.pi))
llike = -0.5 * (nobs*np.log(sigma2)
+ np.sum((errorsest**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
return llike
def score(self, params):
"""
Score vector for Arma model
"""
#return None
#print(params
jac = ndt.Jacobian(self.loglike, stepMax=1e-4)
return jac(params)[-1]
def hessian(self, params):
"""
Hessian of arma model. Currently uses numdifftools
"""
#return None
Hfun = ndt.Jacobian(self.score, stepMax=1e-4)
return Hfun(params)[-1]
def fit(self, start_params=None, maxiter=5000, method='fmin', tol=1e-08):
if start_params is None:
start_params = np.concatenate((0.05*np.ones(self.nar + self.nma), [1]))
mlefit = super(Arma, self).fit(start_params=start_params,
maxiter=maxiter, method=method, tol=tol)
return mlefit
def generate_kindofgarch(nobs, ar, ma, mu=1.):
'''simulate garch like process but not squared errors in arma
used for initial trial but produces nice graph
'''
#garm1, gmam1 = [0.4], [0.2]
#pqmax = 1
# res = np.zeros(nobs+pqmax)
# rvs = np.random.randn(nobs+pqmax,2)
# for t in range(pqmax,nobs+pqmax):
# res[i] =
#ar = [1.0, -0.99]
#ma = [1.0, 0.5]
#this has the wrong distribution, should be eps**2
#TODO: use new version tsa.arima.??? instead, has distr option
#arest = tsa.arima.ARIMA()
#arest = tsa.arima.ARIMA #try class method, ARIMA needs data in constructor
from statsmodels.tsa.arima_process import arma_generate_sample
h = arma_generate_sample(ar,ma,nobs,0.1)
#h = np.abs(h)
h = (mu+h)**2
h = np.exp(h)
err = np.sqrt(h)*np.random.randn(nobs)
return err, h
def generate_garch(nobs, ar, ma, mu=1., scale=0.1):
'''simulate standard garch
scale : float
scale/standard deviation of innovation process in GARCH process
'''
eta = scale*np.random.randn(nobs)
# copied from armageneratesample
h = signal.lfilter(ma, ar, eta**2)
#
#h = (mu+h)**2
#h = np.abs(h)
#h = np.exp(h)
#err = np.sqrt(h)*np.random.randn(nobs)
err = np.sqrt(h)*eta #np.random.standard_t(8, size=nobs)
return err, h
def generate_gjrgarch(nobs, ar, ma, mu=1., scale=0.1, varinnovation=None):
'''simulate gjr garch process
Parameters
----------
ar : array_like, 1d
autoregressive term for variance
ma : array_like, 2d
moving average term for variance, with coefficients for negative
shocks in second column
mu : float
constant in variance law of motion
scale : float
scale/standard deviation of innovation process in GARCH process
Returns
-------
err : array 1d, (nobs+?,)
simulated gjr-garch process,
h : array 1d, (nobs+?,)
simulated variance
etax : array 1d, (nobs+?,)
data matrix for constant and ma terms in variance equation
Notes
-----
References
----------
'''
if varinnovation is None: # rename ?
eta = scale*np.random.randn(nobs)
else:
eta = varinnovation
# copied from armageneratesample
etax = np.empty((nobs,3))
etax[:,0] = mu
etax[:,1:] = (eta**2)[:,None]
etax[eta>0,2] = 0
h = miso_lfilter(ar, ma, etax)[0]
#
#h = (mu+h)**2
#h = np.abs(h)
#h = np.exp(h)
#err = np.sqrt(h)*np.random.randn(nobs)
#print('h.shape', h.shape)
err = np.sqrt(h[:len(eta)])*eta #np.random.standard_t(8, size=len(h))
return err, h, etax
def loglike_GARCH11(params, y):
# Computes the likelihood vector of a GARCH11
# assumes y is centered
w = params[0] # constant (1);
alpha = params[1] # coefficient of lagged squared error
beta = params[2] # coefficient of lagged variance
y2 = y**2;
nobs = y2.shape[0]
ht = np.zeros(nobs);
ht[0] = y2.mean() #sum(y2)/T;
for i in range(1,nobs):
ht[i] = w + alpha*y2[i-1] + beta * ht[i-1]
sqrtht = np.sqrt(ht)
x = y/sqrtht
llvalues = -0.5*np.log(2*np.pi) - np.log(sqrtht) - 0.5*(x**2);
return llvalues.sum(), llvalues, ht
from statsmodels.tsa.filters.filtertools import miso_lfilter
#copied to statsmodels.tsa.filters.filtertools
def miso_lfilter_old(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
def test_misofilter():
x = np.arange(20).reshape(10,2)
y, inp = miso_lfilter([1., -1],[[1,1],[0,0]], x)
assert_almost_equal(y[:-1], x.sum(1).cumsum(), decimal=15)
inp2 = signal.convolve(np.arange(20),np.ones(2))[1::2]
assert_almost_equal(inp[:-1], inp2, decimal=15)
inp2 = signal.convolve(np.arange(20),np.ones(4))[1::2]
y, inp = miso_lfilter([1., -1],[[1,1],[1,1]], x)
assert_almost_equal(y, inp2.cumsum(), decimal=15)
assert_almost_equal(inp, inp2, decimal=15)
y, inp = miso_lfilter([1., 0],[[1,1],[1,1]], x)
assert_almost_equal(y, inp2, decimal=15)
assert_almost_equal(inp, inp2, decimal=15)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
y3 = (x3*np.array([-2,3,1])).sum(1)
assert_almost_equal(y[:-1], y3, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,1]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,1.0,0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,0]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[1.0,0.0,0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y, inp = miso_lfilter([1., -1],np.array([[-2.0,3,1],[1.0,0.0,0]]),x3)
assert_almost_equal(y[:-1], y4.cumsum(), decimal=15)
y4 = y3.copy()
y4[1:] += x3[:-1,2]
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,1.0]]),x3)
assert_almost_equal(y[:-1], y4, decimal=15)
assert_almost_equal(y, inp, decimal=15)
y, inp = miso_lfilter([1., -1],np.array([[-2.0,3,1],[0.0,0.0,1.0]]),x3)
assert_almost_equal(y[:-1], y4.cumsum(), decimal=15)
y, inp = miso_lfilter([1., 0],[[1,0],[1,0],[1,0]], x)
yt = np.convolve(x[:,0], [1,1,1])
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
y, inp = miso_lfilter([1., 0],[[0,1],[0,1],[0,1]], x)
yt = np.convolve(x[:,1], [1,1,1])
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
y, inp = miso_lfilter([1., 0],[[0,1],[0,1],[1,1]], x)
yt = np.convolve(x[:,1], [1,1,1])
yt[2:] += x[:,0]
assert_almost_equal(y, yt, decimal=15)
assert_almost_equal(inp, yt, decimal=15)
def test_gjrgarch():
# test impulse response of gjr simulator
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht = np.array([ 1., 0.1, 0.05, 0.01, 0., 0. ])
assert_almost_equal(hgjr5[:6], ht, decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
assert_almost_equal(hgjr5[:6], ht.cumsum(), decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht1 = [0]
for h in ht: ht1.append(h-ht1[-1])
assert_almost_equal(hgjr5[:6], ht1[1:], decimal=15)
# negative shock
varinno = np.zeros(100)
varinno[0] = -1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht = np.array([ 1. , 0.9 , 0.75, 0.61, 0. , 0. ])
assert_almost_equal(hgjr5[:6], ht, decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
assert_almost_equal(hgjr5[:6], ht.cumsum(), decimal=15)
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, 1.0],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
ht1 = [0]
for h in ht: ht1.append(h-ht1[-1])
assert_almost_equal(hgjr5[:6], ht1[1:], decimal=15)
'''
>>> print(signal.correlate(x3, np.array([[-2.0,3,1],[0.0,0.0,0]])[::-1,:],mode='full')[:-1, (x3.shape[1]+1)//2]
[ -1. 7. 15. 23. 31. 39. 47. 55. 63. 71.]
>>> (x3*np.array([-2,3,1])).sum(1)
array([ -1., 7., 15., 23., 31., 39., 47., 55., 63., 71.])
'''
def garchplot(err, h, title='Garch simulation'):
plt.figure()
plt.subplot(311)
plt.plot(err)
plt.title(title)
plt.ylabel('y')
plt.subplot(312)
plt.plot(err**2)
plt.ylabel('$y^2$')
plt.subplot(313)
plt.plot(h)
plt.ylabel('conditional variance')
if __name__ == '__main__':
#test_misofilter()
#test_gjrgarch()
examples = ['garch']
if 'arma' in examples:
arest = tsa.arima.ARIMA()
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
y1 -= y1.mean() #no mean correction/constant in estimation so far
arma1 = Arma(y1)
arma1.nar = 1
arma1.nma = 1
arma1res = arma1.fit(method='fmin')
print(arma1res.params)
#Warning need new instance otherwise results carry over
arma2 = Arma(y1)
res2 = arma2.fit(method='bfgs')
print(res2.params)
print(res2.model.hessian(res2.params))
print(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))
resls = arest.fit(y1,1,1)
print(resls[0])
print(resls[1])
print('\nparameter estimate')
print('parameter of DGP ar(1), ma(1), sigma_error')
print([-0.8, 0.5, 0.1])
print('mle with fmin')
print(arma1res.params)
print('mle with bfgs')
print(res2.params)
print('cond. least squares uses optim.leastsq ?')
errls = arest.error_estimate
print(resls[0], np.sqrt(np.dot(errls,errls)/errls.shape[0]))
err = arma1.geterrors(res2.params)
print('cond least squares parameter cov')
#print(np.dot(err,err)/err.shape[0] * resls[1])
#errls = arest.error_estimate
print(np.dot(errls,errls)/errls.shape[0] * resls[1])
# print('fmin hessian')
# print(arma1res.model.optimresults['Hopt'][:2,:2])
print('bfgs hessian')
print(res2.model.optimresults['Hopt'][:2,:2])
print('numdifftools inverse hessian')
print(-np.linalg.inv(ndt.Hessian(arma1.loglike, stepMax=1e-2)(res2.params))[:2,:2])
arma3 = Arma(y1**2)
res3 = arma3.fit(method='bfgs')
print(res3.params)
nobs = 1000
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print('seed', seed)
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print('seed', seed)
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.4,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h)
#plt.show()
print('Garch11')
print(optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2]))
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print('ggres0.params', ggres0.params)
if 'rpy' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print('R acf', tsa.acf(np.power(x,2))[:15])
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print(arma3res.params)
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print(arma3bres.params)
llf = loglike_GARCH11([0.93, 0.9, 0.2], errgjr4)
print(llf[0])
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
''' this looks relatively good
>>> Arma.initialize = lambda x: x
>>> arma3 = Arma(errgjr4**2)
>>> arma3res = arma3.fit()
Warning: Maximum number of function evaluations has been exceeded.
>>> arma3res.params
array([-0.775, -0.583, -0.001])
>>> arma2.nar
1
>>> arma2.nma
1
unit root ?
>>> arma3 = Arma(hgjr4)
>>> arma3res = arma3.fit()
Optimization terminated successfully.
Current function value: -3641.529780
Iterations: 250
Function evaluations: 458
>>> arma3res.params
array([ -1.000e+00, -3.096e-04, 6.343e-03])
or maybe not great
>>> arma3res = arma3.fit(start_params=[-0.8,0.1,0.5],maxiter=5000)
Warning: Maximum number of function evaluations has been exceeded.
>>> arma3res.params
array([-0.086, 0.186, -0.001])
>>> arma3res = arma3.fit(start_params=[-0.8,0.1,0.5],maxiter=5000,method='bfgs')
Divide-by-zero encountered: rhok assumed large
Optimization terminated successfully.
Current function value: -5988.332952
Iterations: 16
Function evaluations: 245
Gradient evaluations: 49
>>> arma3res.params
array([ -9.995e-01, -9.715e-01, 6.501e-04])
'''
'''
current problems
persistence in errgjr looks too low, small tsa.acf(errgjr4**2)[:15]
as a consequence the ML estimate has also very little persistence,
estimated ar term is much too small
-> need to compare with R or matlab
help.search("garch") : ccgarch, garchSim(fGarch), garch(tseries)
HestonNandiGarchFit(fOptions)
> library('fGarch')
> spec = garchSpec()
> x = garchSim(model = spec@model, n = 500)
> acf(x**2) # has low correlation
but fit has high parameters:
> fit = garchFit(~garch(1, 1), data = x)
with rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4)
Final Estimate:
LLH: -3198.2 norm LLH: -3.1982
mu omega alpha1 beta1
1.870485e-04 9.437557e-05 3.457349e-02 1.000000e-08
second run with ar = [1.0, -0.8] ma = [[1., 0, 0],[0, 1.0,0.0]]
Final Estimate:
LLH: -3979.555 norm LLH: -3.979555
mu omega alpha1 beta1
1.465050e-05 1.641482e-05 1.092600e-01 9.654438e-02
mine:
>>> ggres.params
array([ -2.000e-06, 3.283e-03, 3.769e-01, -1.000e-06])
another rain, same ar, ma
Final Estimate:
LLH: -3956.197 norm LLH: -3.956197
mu omega alpha1 beta1
7.487278e-05 1.171238e-06 1.511080e-03 9.440843e-01
every step needs to be compared and tested
something looks wrong with likelihood function, either a silly
mistake or still some conceptional problems
* found the silly mistake, I was normalizing the errors before
plugging into espression for likelihood function
* now gjr garch estimation works and produces results that are very
close to the explicit garch11 estimation
initial conditions for miso_filter need to be cleaned up
lots of clean up to to after the bug hunting
'''
y = np.random.randn(20)
params = [0.93, 0.9, 0.2]
lls, llt, ht = loglike_GARCH11(params, y)
sigma2 = ht
axis=0
nobs = len(ht)
llike = -0.5 * (np.sum(np.log(sigma2),axis)
+ np.sum((y**2)/sigma2, axis)
+ nobs*np.log(2*np.pi))
print(lls, llike)
#print(np.log(stats.norm.pdf(y,scale=np.sqrt(ht))).sum())
'''
>>> optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4)[0], [0.93, 0.9, 0.2])
Optimization terminated successfully.
Current function value: 7312.393886
Iterations: 95
Function evaluations: 175
array([ 3.691, 0.072, 0.932])
>>> ar
[1.0, -0.93000000000000005]
>>> ma
[[1.0, 0, 0], [0, 0.90000000000000002, 0.0]]
'''
np.random.seed(1)
tseries = np.zeros(200) # set first observation
for i in range(1,200): # get 99 more observations based on the given process
error = np.random.randn()
tseries[i] = .9 * tseries[i-1] + .01 * error
tseries = tseries[100:]
armodel = AR(tseries)
#armodel.fit(method='bfgs-b')
#armodel.fit(method='tnc')
#powell should be the most robust, see Hamilton 5.7
armodel.fit(method='powell', penalty=True)
# The below don't work yet
#armodel.fit(method='newton', penalty=True)
#armodel.fit(method='broyden', penalty=True)
print("Unconditional MLE for AR(1) y_t = .9*y_t-1 +.01 * err")
print(armodel.params)
| bsd-3-clause |
harterj/moose | modules/tensor_mechanics/test/tests/drucker_prager/small_deform3.py | 24 | 3894 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(scheme, sqrtj2):
cohesion = 10
friction_degrees = 35
tip_smoother = 8
friction = friction_degrees * np.pi / 180.0
if (scheme == "native"):
aaa = cohesion
bbb = np.tan(friction)
elif (scheme == "outer_tip"):
aaa = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 - np.sin(friction))
bbb = 2 * np.sin(friction) / np.sqrt(3) / (3.0 - np.sin(friction))
elif (scheme == "inner_tip"):
aaa = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 + np.sin(friction))
bbb = 2 * np.sin(friction) / np.sqrt(3) / (3.0 + np.sin(friction))
elif (scheme == "lode_zero"):
aaa = cohesion * np.cos(friction)
bbb = np.sin(friction) / 3.0
elif (scheme == "inner_edge"):
aaa = 3 * cohesion * np.cos(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
bbb = np.sin(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
return (aaa - np.sqrt(tip_smoother * tip_smoother + sqrtj2 * sqrtj2)) / bbb
def sigma_mean(stress):
return (stress[0] + stress[3] + stress[5])/3.0
def sigma_bar(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2)))
def third_inv(stress):
mean = sigma_mean(stress)
return (stress[0] - mean)*(stress[3] - mean)*(stress[5] - mean)
def lode_angle(stress):
bar = sigma_bar(stress)
third = third_inv(stress)
return np.arcsin(-1.5 * np.sqrt(3.0) * third / np.power(bar, 3)) / 3.0
def moose_result(fn):
f = open(fn)
x = []
y = []
for line in f:
if not line.strip():
continue
line = line.strip()
if line.startswith("time") or line.startswith("0"):
continue
line = map(float, line.split(","))
if line[1] < -1E-10:
continue # this is an elastic deformation
trace = 3.0 * sigma_mean(line[3:])
bar = sigma_bar(line[3:])
x.append(trace)
y.append(bar)
f.close()
return (x, y)
plt.figure()
sqrtj2 = np.arange(0, 30, 0.25)
plt.plot(expected("native", sqrtj2), sqrtj2, 'k-', label = 'expected (native)')
mr = moose_result("gold/small_deform3_native.csv")
plt.plot(mr[0], mr[1], 'k^', label = 'MOOSE (native)')
plt.plot(expected("outer_tip", sqrtj2), sqrtj2, 'g-', label = 'expected (outer_tip)')
mr = moose_result("gold/small_deform3_outer_tip.csv")
plt.plot(mr[0], mr[1], 'g^', label = 'MOOSE (outer_tip)')
plt.plot(expected("inner_tip", sqrtj2), sqrtj2, 'b-', label = 'expected (inner_tip)')
mr = moose_result("gold/small_deform3_inner_tip.csv")
plt.plot(mr[0], mr[1], 'b^', label = 'MOOSE (inner_tip)')
plt.plot(expected("lode_zero", sqrtj2), sqrtj2, 'c-', label = 'expected (lode_zero)')
mr = moose_result("gold/small_deform3_lode_zero.csv")
plt.plot(mr[0], mr[1], 'c^', label = 'MOOSE (lode_zero)')
plt.plot(expected("inner_edge", sqrtj2), sqrtj2, 'r-', label = 'expected (inner_edge)')
mr = moose_result("gold/small_deform3_inner_edge.csv")
plt.plot(mr[0], mr[1], 'r^', label = 'MOOSE (inner_edge)')
legend = plt.legend(bbox_to_anchor=(1.16, 0.95))
for label in legend.get_texts():
label.set_fontsize('small')
plt.xlabel("Tr(stress)")
plt.ylabel("sqrt(J2)")
plt.title("Drucker-Prager yield function on meridional plane")
plt.axis([-25, 15, 0, 25])
plt.savefig("small_deform3.png")
sys.exit(0)
| lgpl-2.1 |
a113n/bcbio-nextgen | bcbio/rnaseq/featureCounts.py | 1 | 4334 | import os
import shutil
import bcbio.bam as bam
from bcbio.utils import (file_exists, safe_makedir, append_stem)
from bcbio.pipeline import config_utils
from bcbio.bam import is_paired
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
import bcbio.pipeline.datadict as dd
try:
import pandas as pd
except ImportError:
pd = None
def count(data):
"""
count reads mapping to genes using featureCounts
http://subread.sourceforge.net
"""
in_bam = dd.get_work_bam(data) or dd.get_align_bam(data)
out_dir = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))
if dd.get_aligner(data) == "star":
out_dir = os.path.join(out_dir, "%s_%s" % (dd.get_sample_name(data), dd.get_aligner(data)))
sorted_bam = bam.sort(in_bam, dd.get_config(data), order="queryname", out_dir=safe_makedir(out_dir))
gtf_file = dd.get_gtf_file(data)
work_dir = dd.get_work_dir(data)
out_dir = os.path.join(work_dir, "htseq-count")
safe_makedir(out_dir)
count_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts"
summary_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts.summary"
if file_exists(count_file) and _is_fixed_count_file(count_file):
return count_file
featureCounts = config_utils.get_program("featureCounts", dd.get_config(data))
paired_flag = _paired_flag(in_bam)
strand_flag = _strand_flag(data)
filtered_bam = bam.filter_primary(sorted_bam, data)
cmd = ("{featureCounts} -a {gtf_file} -o {tx_count_file} -s {strand_flag} "
"{paired_flag} {filtered_bam}")
message = ("Count reads in {tx_count_file} mapping to {gtf_file} using "
"featureCounts")
with file_transaction(data, [count_file, summary_file]) as tx_files:
tx_count_file, tx_summary_file = tx_files
do.run(cmd.format(**locals()), message.format(**locals()))
fixed_count_file = _format_count_file(count_file, data)
fixed_summary_file = _change_sample_name(
summary_file, dd.get_sample_name(data), data=data)
shutil.move(fixed_count_file, count_file)
shutil.move(fixed_summary_file, summary_file)
return count_file
def _change_sample_name(in_file, sample_name, data=None):
"""Fix name in feature counts log file to get the same
name in multiqc report.
"""
out_file = append_stem(in_file, "_fixed")
with file_transaction(data, out_file) as tx_out:
with open(tx_out, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("Status"):
line = "Status\t%s.bam" % sample_name
out_handle.write("%s\n" % line.strip())
return out_file
def _is_fixed_count_file(count_file):
if os.path.exists(count_file):
with open(count_file) as in_handle:
line = in_handle.readline().split("\t")
return len(line) == 2
def _format_count_file(count_file, data):
"""
this cuts the count file produced from featureCounts down to
a two column file of gene ids and number of reads mapping to
each gene
"""
COUNT_COLUMN = 5
out_file = os.path.splitext(count_file)[0] + ".fixed.counts"
if file_exists(out_file) and _is_fixed_count_file(out_file):
return out_file
df = pd.io.parsers.read_csv(count_file, sep="\t", index_col=0, header=1)
df_sub = df.iloc[:, COUNT_COLUMN]
with file_transaction(data, out_file) as tx_out_file:
df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False)
return out_file
def _strand_flag(data):
"""
0: unstranded 1: stranded 2: reverse stranded
"""
strand_flag = {"unstranded": "0",
"firststrand": "2",
"secondstrand": "1"}
stranded = dd.get_strandedness(data)
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', 'secondstrand', "
"and 'unstranded")
return strand_flag[stranded]
def _paired_flag(bam_file):
"""
sets flags to handle paired-end BAM files
"""
if is_paired(bam_file):
return "-p -B -C"
else:
return ""
| mit |
jorgehog/Deux-kMC | scripts/felix_cav/sequential_analyze.py | 1 | 3360 | import sys
import os
import numpy as np
from os.path import join
from matplotlib.pylab import *
sys.path.append(join(os.getcwd(), ".."))
from parse_h5_output import ParseKMCHDF5
from intercombinatorzor import ICZ
def find_front_pos(heights):
return heights.mean()
#9 14 17 39 43 56 59 61 62 64
def main():
input_file = sys.argv[1]
skiplist = []
if len(sys.argv) > 2:
skiplist = [int(x) for x in sys.argv[2:]]
parser = ParseKMCHDF5(input_file)
def skip(data):
return data.attrs["flux"] != 2.40
every = 1000
thermRatio = 0.25
l = None
n_entries = 0
for data, L, W, run_id in parser:
if skip(data):
continue
if not l:
l = len(data["time"])
n_entries += 1
therm = l*thermRatio
nbins = 20
cmat = np.zeros(shape=(n_entries, nbins))
dy = W/float(nbins)
combinator = ICZ("Time", "ys")
Cmean = 0
cmeancount = 0
entry_count = 0
for data, L, W, run_id in parser:
if skip(data):
continue
conf_height = data.attrs["height"]
stored_heights = data["stored_heights"]
stored_particles = data["stored_particles"]
stored_heights_indices = sorted(stored_heights, key=lambda x: int(x))
time = data["time"][()]
ys_vec = np.zeros(len(time)/every)
#Shift with 1 to translate from starting time till ending times
t_prev = 0
tot_weight = 0
for hi, heights_id in enumerate(stored_heights_indices):
if hi % every != 0:
continue
heights = stored_heights[heights_id][()].transpose()
ys = find_front_pos(heights)
ys_vec[hi/every] = ys
if hi < len(time) - 1:
t_new = time[hi+1]
if hi >= therm:
if heights_id in stored_particles:
particles = stored_particles[heights_id][()]
else:
particles = []
dt = t_new - t_prev
for x, y, _ in particles:
xl = round(x)
yl = round(y)
dh = conf_height - heights[xl, yl] - 1
cmat[entry_count, int((y+0.5)/dy)] += dt/dh
tot_weight += dt
t_prev = t_new
if hi % 100 == 0:
sys.stdout.flush()
print "\r%d/%d" % (hi, len(stored_heights)),
cmat[entry_count, :] /= tot_weight
sys.stdout.flush()
print
print "\rfin %d / %d" % (entry_count+1, n_entries)
if skiplist:
if entry_count + 1 in skiplist:
ans = "asd"
else:
ans = ""
else:
plot(time[::every], ys_vec)
show()
ans = raw_input("discard? (n)")
if ans == "":
combinator.feed(time[::every], ys_vec)
Cmean += cmat[entry_count, :]
cmeancount += 1
entry_count += 1
print
Cmean /= cmeancount
ti, ys_veci = combinator.intercombine("Time", "ys")
np.save("/tmp/FelixSeqC_t.npy", ti)
np.save("/tmp/FelixSeqC_ys.npy", ys_veci)
np.save("/tmp/FelixSeqC_C.npy", Cmean)
if __name__ == "__main__":
main()
| gpl-3.0 |
jkarnows/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
moberweger/deep-prior-pp | src/util/handdetector.py | 1 | 39679 | """Provides a basic hand detector in depth images.
HandDetector provides interface for detecting hands in depth image, by using the center of mass.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import cv2
from scipy import stats, ndimage
from data.transformations import rotatePoint2D, rotatePoints2D, rotatePoints3D
__author__ = "Markus Oberweger <oberweger@icg.tugraz.at>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "oberweger@icg.tugraz.at"
__status__ = "Development"
class HandDetector(object):
"""
Detect hand based on simple heuristic, centered at Center of Mass
"""
RESIZE_BILINEAR = 0
RESIZE_CV2_NN = 1
RESIZE_CV2_LINEAR = 2
def __init__(self, dpt, fx, fy, importer=None, refineNet=None):
"""
Constructor
:param dpt: depth image
:param fx: camera focal lenght
:param fy: camera focal lenght
"""
self.dpt = dpt
self.maxDepth = min(1500, dpt.max())
self.minDepth = max(10, dpt.min())
# set values out of range to 0
self.dpt[self.dpt > self.maxDepth] = 0.
self.dpt[self.dpt < self.minDepth] = 0.
# camera settings
self.fx = fx
self.fy = fy
# Optional refinement of CoM
self.refineNet = refineNet
self.importer = importer
# depth resize method
self.resizeMethod = self.RESIZE_CV2_NN
@staticmethod
def detectionModeToString(com, refineNet):
"""
Get string for detection method
:param com: center of mass
:param refineNet: CoM refinement
:return: string
"""
if com is False and refineNet is False:
cfg = 'gt'
elif com is True and refineNet is False:
cfg = 'com'
elif com is True and refineNet is True:
cfg = 'comref'
else:
raise NotImplementedError("com {}, refineNet {}".format(com, refineNet))
return cfg
def calculateCoM(self, dpt):
"""
Calculate the center of mass
:param dpt: depth image
:return: (x,y,z) center of mass
"""
dc = dpt.copy()
dc[dc < self.minDepth] = 0
dc[dc > self.maxDepth] = 0
cc = ndimage.measurements.center_of_mass(dc > 0)
num = numpy.count_nonzero(dc)
com = numpy.array((cc[1]*num, cc[0]*num, dc.sum()), numpy.float)
if num == 0:
return numpy.array((0, 0, 0), numpy.float)
else:
return com/num
def checkImage(self, tol):
"""
Check if there is some content in the image
:param tol: tolerance
:return:True if image is contentful, otherwise false
"""
# print numpy.std(self.dpt)
if numpy.std(self.dpt) < tol:
return False
else:
return True
def getNDValue(self):
"""
Get value of not defined depth value distances
:return:value of not defined depth value
"""
if self.dpt[self.dpt < self.minDepth].shape[0] > self.dpt[self.dpt > self.maxDepth].shape[0]:
return stats.mode(self.dpt[self.dpt < self.minDepth])[0][0]
else:
return stats.mode(self.dpt[self.dpt > self.maxDepth])[0][0]
@staticmethod
def bilinearResize(src, dsize, ndValue):
"""
Bilinear resizing with sparing out not defined parts of the depth map
:param src: source depth map
:param dsize: new size of resized depth map
:param ndValue: value of not defined depth
:return:resized depth map
"""
dst = numpy.zeros((dsize[1], dsize[0]), dtype=numpy.float32)
x_ratio = float(src.shape[1] - 1) / dst.shape[1]
y_ratio = float(src.shape[0] - 1) / dst.shape[0]
for row in range(dst.shape[0]):
y = int(row * y_ratio)
y_diff = (row * y_ratio) - y # distance of the nearest pixel(y axis)
y_diff_2 = 1 - y_diff
for col in range(dst.shape[1]):
x = int(col * x_ratio)
x_diff = (col * x_ratio) - x # distance of the nearest pixel(x axis)
x_diff_2 = 1 - x_diff
y2_cross_x2 = y_diff_2 * x_diff_2
y2_cross_x = y_diff_2 * x_diff
y_cross_x2 = y_diff * x_diff_2
y_cross_x = y_diff * x_diff
# mathematically impossible, but just to be sure...
if(x+1 >= src.shape[1]) | (y+1 >= src.shape[0]):
raise UserWarning("Shape mismatch")
# set value to ND if there are more than two values ND
numND = int(src[y, x] == ndValue) + int(src[y, x + 1] == ndValue) + int(src[y + 1, x] == ndValue) + int(
src[y + 1, x + 1] == ndValue)
if numND > 2:
dst[row, col] = ndValue
continue
# print y2_cross_x2, y2_cross_x, y_cross_x2, y_cross_x
# interpolate only over known values, switch to linear interpolation
if src[y, x] == ndValue:
y2_cross_x2 = 0.
y2_cross_x = 1. - y_cross_x - y_cross_x2
if src[y, x + 1] == ndValue:
y2_cross_x = 0.
if y2_cross_x2 != 0.:
y2_cross_x2 = 1. - y_cross_x - y_cross_x2
if src[y + 1, x] == ndValue:
y_cross_x2 = 0.
y_cross_x = 1. - y2_cross_x - y2_cross_x2
if src[y + 1, x + 1] == ndValue:
y_cross_x = 0.
if y_cross_x2 != 0.:
y_cross_x2 = 1. - y2_cross_x - y2_cross_x2
# print src[y, x], src[y, x+1],src[y+1, x],src[y+1, x+1]
# normalize weights
if not ((y2_cross_x2 == 0.) & (y2_cross_x == 0.) & (y_cross_x2 == 0.) & (y_cross_x == 0.)):
sc = 1. / (y_cross_x + y_cross_x2 + y2_cross_x + y2_cross_x2)
y2_cross_x2 *= sc
y2_cross_x *= sc
y_cross_x2 *= sc
y_cross_x *= sc
# print y2_cross_x2, y2_cross_x, y_cross_x2, y_cross_x
if (y2_cross_x2 == 0.) & (y2_cross_x == 0.) & (y_cross_x2 == 0.) & (y_cross_x == 0.):
dst[row, col] = ndValue
else:
dst[row, col] = y2_cross_x2 * src[y, x] + y2_cross_x * src[y, x + 1] + y_cross_x2 * src[
y + 1, x] + y_cross_x * src[y + 1, x + 1]
return dst
def comToBounds(self, com, size):
"""
Calculate boundaries, project to 3D, then add offset and backproject to 2D (ux, uy are canceled)
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:return: xstart, xend, ystart, yend, zstart, zend
"""
if numpy.isclose(com[2], 0.):
print "Warning: CoM ill-defined!"
xstart = self.dpt.shape[0]//4
xend = xstart + self.dpt.shape[0]//2
ystart = self.dpt.shape[1]//4
yend = ystart + self.dpt.shape[1]//2
zstart = self.minDepth
zend = self.maxDepth
else:
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
xstart = int(numpy.floor((com[0] * com[2] / self.fx - size[0] / 2.) / com[2]*self.fx+0.5))
xend = int(numpy.floor((com[0] * com[2] / self.fx + size[0] / 2.) / com[2]*self.fx+0.5))
ystart = int(numpy.floor((com[1] * com[2] / self.fy - size[1] / 2.) / com[2]*self.fy+0.5))
yend = int(numpy.floor((com[1] * com[2] / self.fy + size[1] / 2.) / com[2]*self.fy+0.5))
return xstart, xend, ystart, yend, zstart, zend
def comToTransform(self, com, size, dsize=(128, 128)):
"""
Calculate affine transform from crop
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:return: affine transform
"""
xstart, xend, ystart, yend, _, _ = self.comToBounds(com, size)
trans = numpy.eye(3)
trans[0, 2] = -xstart
trans[1, 2] = -ystart
wb = (xend - xstart)
hb = (yend - ystart)
if wb > hb:
scale = numpy.eye(3) * dsize[0] / float(wb)
sz = (dsize[0], hb * dsize[0] / wb)
else:
scale = numpy.eye(3) * dsize[1] / float(hb)
sz = (wb * dsize[1] / hb, dsize[1])
scale[2, 2] = 1
xstart = int(numpy.floor(dsize[0] / 2. - sz[1] / 2.))
ystart = int(numpy.floor(dsize[1] / 2. - sz[0] / 2.))
off = numpy.eye(3)
off[0, 2] = xstart
off[1, 2] = ystart
return numpy.dot(off, numpy.dot(scale, trans))
def getCrop(self, dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z=True, background=0):
"""
Crop patch from image
:param dpt: depth image to crop from
:param xstart: start x
:param xend: end x
:param ystart: start y
:param yend: end y
:param zstart: start z
:param zend: end z
:param thresh_z: threshold z values
:return: cropped image
"""
if len(dpt.shape) == 2:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1])].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1]))), mode='constant', constant_values=background)
elif len(dpt.shape) == 3:
cropped = dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1]), :].copy()
# add pixels that are out of the image in order to keep aspect ratio
cropped = numpy.pad(cropped, ((abs(ystart)-max(ystart, 0),
abs(yend)-min(yend, dpt.shape[0])),
(abs(xstart)-max(xstart, 0),
abs(xend)-min(xend, dpt.shape[1])),
(0, 0)), mode='constant', constant_values=background)
else:
raise NotImplementedError()
if thresh_z is True:
msk1 = numpy.logical_and(cropped < zstart, cropped != 0)
msk2 = numpy.logical_and(cropped > zend, cropped != 0)
cropped[msk1] = zstart
cropped[msk2] = 0. # backface is at 0, it is set later
return cropped
def getInverseCrop(self, crop, sz, xstart, xend, ystart, yend, zstart, zend, thresh_z=True, background=0):
"""
Crop patch from image
:param crop: cropped depth image
:param xstart: start x
:param xend: end x
:param ystart: start y
:param yend: end y
:param zstart: start z
:param zend: end z
:param thresh_z: threshold z values
:return: depth image with crop put on position
"""
dpt = numpy.ones(sz, dtype=crop.dtype)*background
if (xend < 0 and xstart < 0) or (yend < 0 and ystart < 0):
return dpt
if (xend > dpt.shape[1] and xstart > dpt.shape[1]) or (yend > dpt.shape[0] and ystart > dpt.shape[0]):
return dpt
if xend == xstart or yend == ystart:
return dpt
cropped = self.resizeCrop(crop, (xend-xstart, yend-ystart))
if len(dpt.shape) == 2:
dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1])] = cropped[max(-ystart, 0):cropped.shape[0]-max(yend-dpt.shape[0], 0), max(-xstart, 0):cropped.shape[1]-max(xend-dpt.shape[1], 0)]
elif len(dpt.shape) == 3:
dpt[max(ystart, 0):min(yend, dpt.shape[0]), max(xstart, 0):min(xend, dpt.shape[1]), :] = cropped[max(-ystart, 0):cropped.shape[0]-max(yend-dpt.shape[0], 0), max(-xstart, 0):cropped.shape[1]-max(xend-dpt.shape[1], 0)]
else:
raise NotImplementedError()
if thresh_z is True:
msk1 = numpy.logical_and(dpt < zstart, dpt != 0)
msk2 = numpy.logical_and(dpt > zend, dpt != 0)
dpt[msk1] = zstart
dpt[msk2] = 0. # backface is at 0, it is set later
return dpt
def resizeCrop(self, crop, sz):
"""
Resize cropped image
:param crop: crop
:param sz: size
:return: resized image
"""
if self.resizeMethod == self.RESIZE_CV2_NN:
rz = cv2.resize(crop, sz, interpolation=cv2.INTER_NEAREST)
elif self.resizeMethod == self.RESIZE_BILINEAR:
rz = self.bilinearResize(crop, sz, self.getNDValue())
elif self.resizeMethod == self.RESIZE_CV2_LINEAR:
rz = cv2.resize(crop, sz, interpolation=cv2.INTER_LINEAR)
else:
raise NotImplementedError("Unknown resize method!")
return rz
def applyCrop3D(self, dpt, com, size, dsize, thresh_z=True, background=None):
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(dpt, xstart, xend, ystart, yend, zstart, zend, thresh_z, background)
wb = (xend - xstart)
hb = (yend - ystart)
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
# depth resize
rz = self.resizeCrop(cropped, sz)
if background is None:
background = self.getNDValue() # use background as filler
ret = numpy.ones(dsize, numpy.float32) * background
xstart = int(numpy.floor(dsize[0] / 2. - rz.shape[1] / 2.))
xend = int(xstart + rz.shape[1])
ystart = int(numpy.floor(dsize[1] / 2. - rz.shape[0] / 2.))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
return ret
def cropArea3D(self, com=None, size=(250, 250, 250), dsize=(128, 128), docom=False):
"""
Crop area of hand in 3D volumina, scales inverse to the distance of hand to camera
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param size: (x,y,z) extent of the source crop volume in mm
:param dsize: (x,y) extent of the destination size
:return: cropped hand image, transformation matrix for joints, CoM in image coordinates
"""
# print com, self.importer.jointImgTo3D(com)
# import matplotlib.pyplot as plt
# import matplotlib
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.imshow(self.dpt, cmap=matplotlib.cm.jet)
if len(size) != 3 or len(dsize) != 2:
raise ValueError("Size must be 3D and dsize 2D bounding box")
if com is None:
com = self.calculateCoM(self.dpt)
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='.')
#############
# for simulating COM within cube
if docom is True:
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
if numpy.isclose(com[2], 0):
com[2] = 300
com[0] += xstart
com[1] += ystart
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='x')
##############
if docom is True and self.refineNet is not None and self.importer is not None:
rz = self.resizeCrop(cropped, dsize)
newCom3D = self.refineCoM(rz, size, com) + self.importer.jointImgTo3D(com)
com = self.importer.joint3DToImg(newCom3D)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# ax.plot(com[0],com[1],marker='o')
# plt.show(block=True)
#############
wb = (xend - xstart)
hb = (yend - ystart)
if wb > hb:
sz = (dsize[0], hb * dsize[0] / wb)
else:
sz = (wb * dsize[1] / hb, dsize[1])
# print com, sz, cropped.shape, xstart, xend, ystart, yend, hb, wb, zstart, zend
trans = numpy.eye(3)
trans[0, 2] = -xstart
trans[1, 2] = -ystart
if cropped.shape[0] > cropped.shape[1]:
scale = numpy.eye(3) * sz[1] / float(cropped.shape[0])
else:
scale = numpy.eye(3) * sz[0] / float(cropped.shape[1])
scale[2, 2] = 1
# depth resize
rz = self.resizeCrop(cropped, sz)
# pylab.imshow(rz); pylab.gray();t=transformPoint2D(com,scale*trans);pylab.scatter(t[0],t[1]); pylab.show()
ret = numpy.ones(dsize, numpy.float32) * self.getNDValue() # use background as filler
xstart = int(numpy.floor(dsize[0] / 2. - rz.shape[1] / 2.))
xend = int(xstart + rz.shape[1])
ystart = int(numpy.floor(dsize[1] / 2. - rz.shape[0] / 2.))
yend = int(ystart + rz.shape[0])
ret[ystart:yend, xstart:xend] = rz
# print rz.shape, xstart, ystart
off = numpy.eye(3)
off[0, 2] = xstart
off[1, 2] = ystart
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(131)
# ax.imshow(cropped, cmap='jet')
# ax = fig.add_subplot(132)
# ax.imshow(rz, cmap='jet')
# ax = fig.add_subplot(133)
# ax.imshow(ret, cmap='jet')
# plt.show(block=False)
# print trans,scale,off,numpy.dot(off, numpy.dot(scale, trans))
return ret, numpy.dot(off, numpy.dot(scale, trans)), com
def checkPose(self, joints):
"""
Check if pose is anatomically possible
@see Serre: Kinematic model of the hand using computer vision
:param joints: joint locations R^16x3
:return: true if pose is possible
"""
# check dip, pip of fingers
return True
def track(self, com, size=(250, 250, 250), dsize=(128, 128), doHandSize=True):
"""
Detect the hand as closest object to camera
:param size: bounding box size
:return: center of mass of hand
"""
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop patch from source
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
# predict movement of CoM
if self.refineNet is not None and self.importer is not None:
rz = self.resizeCrop(cropped, dsize)
newCom3D = self.refineCoM(rz, size, com) + self.importer.jointImgTo3D(com)
com = self.importer.joint3DToImg(newCom3D)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
else:
raise RuntimeError("Need refineNet for this")
if doHandSize is True:
# refined contour for size estimation
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
part_ref = self.dpt.copy()
part_ref[part_ref < zstart] = 0
part_ref[part_ref > zend] = 0
part_ref[part_ref != 0] = 10 # set to something
ret, thresh_ref = cv2.threshold(part_ref, 1, 255, cv2.THRESH_BINARY)
contours_ref, _ = cv2.findContours(thresh_ref.astype(dtype=numpy.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find the largest contour
areas = [cv2.contourArea(cc) for cc in contours_ref]
c_max = numpy.argmax(areas)
# final result
return com, self.estimateHandsize(contours_ref[c_max], com, size)
else:
return com, size
def refineCoMIterative(self, com, num_iter, size=(250, 250, 250)):
"""
Refine com iteratively
:param com: center of mass, in image coordinates (x,y,z), z in mm
:param num_iter: number of iterations
:param size: (x,y,z) extent of the source crop volume in mm
:return: refined com
"""
for k in xrange(num_iter):
# calculate boundaries
xstart, xend, ystart, yend, zstart, zend = self.comToBounds(com, size)
# crop
cropped = self.getCrop(self.dpt, xstart, xend, ystart, yend, zstart, zend)
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
com[0] += max(xstart, 0)
com[1] += max(ystart, 0)
return com
def detect(self, size=(250, 250, 250), doHandSize=True):
"""
Detect the hand as closest object to camera
:param size: bounding box size
:return: center of mass of hand
"""
steps = 20
dz = (self.maxDepth - self.minDepth)/float(steps)
for i in range(steps):
part = self.dpt.copy()
part[part < i*dz + self.minDepth] = 0
part[part > (i+1)*dz + self.minDepth] = 0
part[part != 0] = 10 # set to something
ret, thresh = cv2.threshold(part, 1, 255, cv2.THRESH_BINARY)
thresh = thresh.astype(dtype=numpy.uint8)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in range(len(contours)):
if cv2.contourArea(contours[c]) > 200:
# centroid
M = cv2.moments(contours[c])
cx = int(numpy.rint(M['m10']/M['m00']))
cy = int(numpy.rint(M['m01']/M['m00']))
# crop
xstart = int(max(cx-100, 0))
xend = int(min(cx+100, self.dpt.shape[1]-1))
ystart = int(max(cy-100, 0))
yend = int(min(cy+100, self.dpt.shape[0]-1))
cropped = self.dpt[ystart:yend, xstart:xend].copy()
cropped[cropped < i*dz + self.minDepth] = 0.
cropped[cropped > (i+1)*dz + self.minDepth] = 0.
com = self.calculateCoM(cropped)
if numpy.allclose(com, 0.):
com[2] = cropped[cropped.shape[0]//2, cropped.shape[1]//2]
com[0] += xstart
com[1] += ystart
# refine iteratively
com = self.refineCoMIterative(com, 5, size)
zstart = com[2] - size[2] / 2.
zend = com[2] + size[2] / 2.
if doHandSize is True:
# refined contour for size estimation
part_ref = self.dpt.copy()
part_ref[part_ref < zstart] = 0
part_ref[part_ref > zend] = 0
part_ref[part_ref != 0] = 10 # set to something
ret, thresh_ref = cv2.threshold(part_ref, 1, 255, cv2.THRESH_BINARY)
contours_ref, _ = cv2.findContours(thresh_ref.astype(dtype=numpy.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find the largest contour
areas = [cv2.contourArea(cc) for cc in contours_ref]
c_max = numpy.argmax(areas)
# final result
return com, self.estimateHandsize(contours_ref[c_max], com, size)
else:
return com, size
# no appropriate hand detected
return numpy.array((0, 0, 0), numpy.float), size
def refineCoM(self, cropped, size, com):
"""
Refines the detection result of the hand
:return: center of hand
"""
imgD = numpy.asarray(cropped.copy(), 'float32')
imgD[imgD == 0] = com[2] + (size[2] / 2.)
imgD[imgD >= com[2] + (size[2] / 2.)] = com[2] + (size[2] / 2.)
imgD[imgD <= com[2] - (size[2] / 2.)] = com[2] - (size[2] / 2.)
imgD -= com[2]
imgD /= (size[2] / 2.)
test_data = numpy.zeros((1, 1, cropped.shape[0], cropped.shape[1]), dtype='float32')
test_data[0, 0] = imgD
# test_data2 = numpy.zeros((test_data.shape[0], test_data.shape[1], test_data.shape[2]//2, test_data.shape[3]//2), dtype='float32')
# test_data4 = numpy.zeros((test_data2.shape[0], test_data2.shape[1], test_data2.shape[2]//2, test_data2.shape[3]//2), dtype='float32')
# for j in range(test_data.shape[0]):
# for i in range(test_data.shape[1]):
# test_data2[j, i, :, :] = cv2.resize(test_data[j, i, :, :], (test_data2.shape[3], test_data2.shape[2]))
# test_data4[j, i, :, :] = cv2.resize(test_data2[j, i, :, :], (test_data4.shape[3], test_data4.shape[2]))
dsize = (int(test_data.shape[2]//2), int(test_data.shape[3]//2))
xstart = int(test_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(test_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
test_data2 = test_data[:, :, ystart:yend, xstart:xend]
dsize = (int(test_data.shape[2]//4), int(test_data.shape[3]//4))
xstart = int(test_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(test_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
test_data4 = test_data[:, :, ystart:yend, xstart:xend]
if self.refineNet.cfgParams.numInputs == 1:
jts = self.refineNet.computeOutput(test_data)
elif self.refineNet.cfgParams.numInputs == 3:
jts = self.refineNet.computeOutput([test_data, test_data2, test_data4])
else:
raise NotImplementedError("Number of inputs is {}".format(self.refineNet.cfgParams.numInputs))
return jts[0]*(size[2]/2.)
def moveCoM(self, dpt, cube, com, off, joints3D, M, pad_value=0):
"""
Adjust already cropped image such that a moving CoM normalization is simulated
:param dpt: cropped depth image with different CoM
:param cube: metric cube of size (sx,sy,sz)
:param com: original center of mass, in image coordinates (x,y,z)
:param off: offset to center of mass (dx,dy,dz) in 3D coordinates
:param joints3D: 3D joint coordinates, cropped to old CoM
:param pad_value: value of padding
:return: adjusted image, new 3D joint coordinates, new center of mass in image coordinates
"""
# if offset is 0, nothing to do
if numpy.allclose(off, 0.):
return dpt, joints3D, com, M
# add offset to com
new_com = self.importer.joint3DToImg(self.importer.jointImgTo3D(com) + off)
# check for 1/0.
if not (numpy.allclose(com[2], 0.) or numpy.allclose(new_com[2], 0.)):
# scale to original size
Mnew = self.comToTransform(new_com, cube, dpt.shape)
new_dpt = self.recropHand(dpt, Mnew, numpy.linalg.inv(M), dpt.shape, background_value=pad_value,
nv_val=32000., thresh_z=True, com=new_com, size=cube)
else:
Mnew = M
new_dpt = dpt
# adjust joint positions to new CoM
new_joints3D = joints3D + self.importer.jointImgTo3D(com) - self.importer.jointImgTo3D(new_com)
return new_dpt, new_joints3D, new_com, Mnew
def rotateHand(self, dpt, cube, com, rot, joints3D, pad_value=0):
"""
Rotate hand virtually in the image plane by a given angle
:param dpt: cropped depth image with different CoM
:param cube: metric cube of size (sx,sy,sz)
:param com: original center of mass, in image coordinates (x,y,z)
:param rot: rotation angle in deg
:param joints3D: original joint coordinates, in 3D coordinates (x,y,z)
:param pad_value: value of padding
:return: adjusted image, new 3D joint coordinates, rotation angle in XXX
"""
# if rot is 0, nothing to do
if numpy.allclose(rot, 0.):
return dpt, joints3D, rot
rot = numpy.mod(rot, 360)
M = cv2.getRotationMatrix2D((dpt.shape[1]//2, dpt.shape[0]//2), -rot, 1)
if self.resizeMethod == self.RESIZE_CV2_NN:
flags = cv2.INTER_NEAREST
elif self.resizeMethod == self.RESIZE_CV2_LINEAR:
flags = cv2.INTER_LINEAR
else:
raise NotImplementedError
new_dpt = cv2.warpAffine(dpt, M, (dpt.shape[1], dpt.shape[0]), flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=pad_value)
com3D = self.importer.jointImgTo3D(com)
joint_2D = self.importer.joints3DToImg(joints3D + com3D)
data_2D = numpy.zeros_like(joint_2D)
for k in xrange(data_2D.shape[0]):
data_2D[k] = rotatePoint2D(joint_2D[k], com[0:2], rot)
new_joints3D = (self.importer.jointsImgTo3D(data_2D) - com3D)
return new_dpt, new_joints3D, rot
def scaleHand(self, dpt, cube, com, sc, joints3D, M, pad_value=0):
"""
Virtually scale the hand by applying different cube
:param dpt: cropped depth image with different CoM
:param cube: metric cube of size (sx,sy,sz)
:param com: original center of mass, in image coordinates (x,y,z)
:param sc: scale factor for cube
:param joints3D: 3D joint coordinates, cropped to old CoM
:param pad_value: value of padding
:return: adjusted image, new 3D joint coordinates, new center of mass in image coordinates
"""
# if scale is 1, nothing to do
if numpy.allclose(sc, 1.):
return dpt, joints3D, cube, M
new_cube = [s*sc for s in cube]
# check for 1/0.
if not numpy.allclose(com[2], 0.):
# scale to original size
Mnew = self.comToTransform(com, new_cube, dpt.shape)
new_dpt = self.recropHand(dpt, Mnew, numpy.linalg.inv(M), dpt.shape, background_value=pad_value,
nv_val=32000., thresh_z=True, com=com, size=cube)
else:
Mnew = M
new_dpt = dpt
new_joints3D = joints3D
return new_dpt, new_joints3D, new_cube, Mnew
def recropHand(self, crop, M, Mnew, target_size, background_value=0., nv_val=0., thresh_z=True, com=None,
size=(250, 250, 250)):
if self.resizeMethod == self.RESIZE_CV2_NN:
flags = cv2.INTER_NEAREST
elif self.resizeMethod == self.RESIZE_CV2_LINEAR:
flags = cv2.INTER_LINEAR
else:
raise NotImplementedError
warped = cv2.warpPerspective(crop, numpy.dot(M, Mnew), target_size, flags=flags,
borderMode=cv2.BORDER_CONSTANT, borderValue=float(background_value))
warped[numpy.isclose(warped, nv_val)] = background_value
if thresh_z is True:
assert com is not None
_, _, _, _, zstart, zend = self.comToBounds(com, size)
msk1 = numpy.logical_and(warped < zstart, warped != 0)
msk2 = numpy.logical_and(warped > zend, warped != 0)
warped[msk1] = zstart
warped[msk2] = 0. # backface is at 0, it is set later
return warped
@staticmethod
def sampleRandomPoses(importer, rng, base_poses, base_com, base_cube, num_poses, aug_modes,
retall=False, rot3D=False, sigma_com=None, sigma_sc=None, rot_range=None):
"""
Sample random poses such that we can estimate the subspace more robustly
:param importer: importer
:param rng: RandomState
:param base_poses: set of base 3D poses
:param base_com: corresponding 3D crop locations
:param base_cube: corresponding crop cubes
:param num_poses: number of poses to sample
:param aug_modes: augmentation modes (comb, com, rot, sc, none)
:param retall: return all random parameters
:param rot3D: augment rotation in 3D, which is only possible with poses not images
:return: random poses
"""
if sigma_com is None:
sigma_com = 5.
if sigma_sc is None:
sigma_sc = 0.02
if rot_range is None:
rot_range = 180.
all_modes = ['none', 'rot', 'sc', 'com', 'rot+com', 'com+rot',
'rot+com+sc', 'rot+sc+com', 'sc+rot+com', 'sc+com+rot', 'com+sc+rot', 'com+rot+sc']
assert all([aug_modes[i] in all_modes for i in xrange(len(aug_modes))])
new_poses = numpy.zeros((int(num_poses), base_poses.shape[1], base_poses.shape[2]), dtype=base_poses.dtype)
new_com = numpy.zeros((int(num_poses), 3), dtype=base_poses.dtype)
new_cube = numpy.zeros((int(num_poses), 3), dtype=base_poses.dtype)
modes = rng.randint(0, len(aug_modes), int(num_poses))
ridxs = rng.randint(0, base_poses.shape[0], int(num_poses))
off = rng.randn(int(num_poses), 3) * sigma_com
sc = numpy.fabs(rng.randn(int(num_poses)) * sigma_sc + 1.)
rot = rng.uniform(-rot_range, rot_range, size=(int(num_poses), 3))
if aug_modes == ['none']:
if retall is True:
return base_poses / (base_cube[:, 2]/2.)[:, None, None], base_com, base_cube
else:
return base_poses / (base_cube[:, 2]/2.)[:, None, None]
for i in xrange(int(num_poses)):
mode = modes[i]
ridx = ridxs[i]
cube = base_cube[ridx]
com3D = base_com[ridx]
pose = base_poses[ridx]
if aug_modes[mode] == 'com':
# augment com
new_com[i] = com3D + off[i]
new_cube[i] = cube
new_poses[i] = (pose + com3D - new_com[i]) / (new_cube[i][2]/2.)
elif aug_modes[mode] == 'rot':
# augment rotation
new_com[i] = com3D
new_cube[i] = cube
if rot3D is False:
joint_2D = importer.joints3DToImg(pose + new_com[i])
data_2D = rotatePoints2D(joint_2D, importer.joint3DToImg(com3D)[0:2], rot[i, 0])
new_poses[i] = (importer.jointsImgTo3D(data_2D) - new_com[i]) / (new_cube[i][2]/2.)
else:
new_poses[i] = (rotatePoints3D(pose + new_com[i], new_com[i], rot[i, 0], rot[i, 1], rot[i, 2]) - new_com[i]) / (new_cube[i][2]/2.)
elif aug_modes[mode] == 'sc':
# augment cube
new_com[i] = com3D
new_cube[i] = cube*sc[i]
new_poses[i] = pose / (new_cube[i][2]/2.)
elif aug_modes[mode] == 'none':
# no augmentation
new_com[i] = com3D
new_cube[i] = cube
new_poses[i] = pose / (new_cube[i][2]/2.)
elif aug_modes[mode] == 'rot+com' or aug_modes[mode] == 'com+rot':
# augment com+rot
new_com[i] = com3D + off[i]
new_cube[i] = cube
pose = (pose + com3D - new_com[i])
if rot3D is False:
joint_2D = importer.joints3DToImg(pose + com3D)
data_2D = rotatePoints2D(joint_2D, importer.joint3DToImg(new_com[i])[0:2], rot[i, 0])
new_poses[i] = (importer.jointsImgTo3D(data_2D) - com3D) / (new_cube[i][2] / 2.)
else:
new_poses[i] = (rotatePoints3D(pose + new_com[i], new_com[i], rot[i, 0], rot[i, 1], rot[i, 2]) - new_com[i]) / (new_cube[i][2] / 2.)
elif aug_modes[mode] == 'rot+com+sc' or aug_modes[mode] == 'rot+sc+com' or aug_modes == 'sc+rot+com' or aug_modes == 'sc+com+rot' or aug_modes == 'com+sc+rot' or aug_modes == 'com+rot+sc':
# augment com+scale+rot
new_com[i] = com3D + off[i]
new_cube[i] = cube
pose = (pose + com3D - new_com[i])
pose = pose * sc[i]
if rot3D is False:
joint_2D = importer.joints3DToImg(pose + com3D)
data_2D = rotatePoints2D(joint_2D, importer.joint3DToImg(new_com[i])[0:2], rot[i, 0])
new_poses[i] = (importer.jointsImgTo3D(data_2D) - com3D) / (new_cube[i][2] / 2.)
else:
new_poses[i] = (rotatePoints3D(pose + new_com[i], new_com[i], rot[i, 0], rot[i, 1], rot[i, 2]) - new_com[i]) / (new_cube[i][2] / 2.)
else:
raise NotImplementedError()
if retall is True:
return new_poses, new_com, new_cube, rot
else:
return new_poses
def estimateHandsize(self, contours, com, cube=(250, 250, 250), tol=0.):
"""
Estimate hand size from contours
:param contours: contours of hand
:param com: center of mass
:param cube: default cube
:param tol: tolerance to be added to all sides
:return: metric cube for cropping (x, y, z)
"""
x, y, w, h = cv2.boundingRect(contours)
# drawing = numpy.zeros((480, 640), dtype=float)
# cv2.drawContours(drawing, [contours], 0, (255, 0, 244), 1, 8)
# cv2.rectangle(drawing, (x, y), (x+w, y+h), (244, 0, 233), 2, 8, 0)
# cv2.imshow("contour", drawing)
# convert to cube
xstart = (com[0] - w / 2.) * com[2] / self.fx
xend = (com[0] + w / 2.) * com[2] / self.fx
ystart = (com[1] - h / 2.) * com[2] / self.fy
yend = (com[1] + h / 2.) * com[2] / self.fy
szx = xend - xstart
szy = yend - ystart
sz = (szx + szy) / 2.
cube = (sz + tol, sz + tol, sz + tol)
return cube
| gpl-3.0 |
quheng/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
dch312/numpy | numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
tejasckulkarni/hydrology | Plots/MCMC/distribution_plot.py | 1 | 10969 | __author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import checkdam.checkdam as cd
from datetime import timedelta
import scipy.stats as stats
import matplotlib
matplotlib.rc('font', **{'family': 'sans-serif', 'serif': ['Computer Modern Roman']})
matplotlib.rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=36)
params = {'axes.labelsize': 28, # fontsize for x and y labels (was 10)
'axes.titlesize': 30,
'text.fontsize': 30, # was 10
'legend.fontsize': 30, # was 10
'xtick.labelsize': 28,
'ytick.labelsize': 28,
'text.usetex': True,
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
"""
Weather
Read from csv and create datetime index, re arrange columns, drop unwanted columns
"""
weather_ksndmc = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/KSNDMC/Tubgere_weather_01May14_10Feb15.csv'
weather_ksndmc_df = pd.read_csv(weather_ksndmc, sep=',') # read from csv
weather_ksndmc_df.drop(['Sl no', 'HOBLI'], inplace=True, axis=1) # drop columns
weather_date_format = "%d-%b-%y %H:%M:%S+05:30"
weather_ksndmc_df['date_time'] = pd.to_datetime(weather_ksndmc_df['DATE'] + " " + weather_ksndmc_df['TIME'],
format=weather_date_format)
weather_ksndmc_df.set_index(weather_ksndmc_df['date_time'], inplace=True) # create datetime index
weather_ksndmc_df.sort_index(inplace=True) # sort
cols = weather_ksndmc_df.columns.tolist() # rearrange columns
cols = cols[-1:] + cols[:-1]
weather_ksndmc_df = weather_ksndmc_df[cols]
weather_ksndmc_df.drop(['date_time', 'DATE', 'TIME'], inplace=True, axis=1) # drop columns
minute = weather_ksndmc_df.index.minute
weather_ksndmc_df = weather_ksndmc_df[
((minute == 0) | (minute == 15) | (minute == 30) | (minute == 45) | (minute == 60))]
# drop duplicates
weather_ksndmc_df['index'] = weather_ksndmc_df.index
weather_ksndmc_df.drop_duplicates(subset='index', take_last=True, inplace=True)
del weather_ksndmc_df['index']
weather_ksndmc_df = weather_ksndmc_df.sort()
start_time = min(weather_ksndmc_df.index)
end_time = max(weather_ksndmc_df.index)
new_index = pd.date_range(start=start_time, end=end_time, freq='15min')
weather_ksndmc_df = weather_ksndmc_df.reindex(new_index, fill_value=5)
weather_ksndmc_df["WIND_SPEED"] = np.where(weather_ksndmc_df["WIND_SPEED"] > 3.0, None,
weather_ksndmc_df['WIND_SPEED'])
max_limit = max(weather_ksndmc_df.index) - timedelta(days=1)
min_limit = min(weather_ksndmc_df.index) + timedelta(days=1)
for index in weather_ksndmc_df.index:
if weather_ksndmc_df['WIND_SPEED'][index] is None and (index > min_limit) and (index < max_limit):
previous_day_value = weather_ksndmc_df['WIND_SPEED'][index - timedelta(days=1)]
next_day_value = weather_ksndmc_df['WIND_SPEED'][index + timedelta(days=1)]
if (previous_day_value != None) and (next_day_value != None):
weather_ksndmc_df["WIND_SPEED"][index] = 0.5 * (previous_day_value + next_day_value)
"""
First period missing data
"""
prior_missing_data = weather_ksndmc_df['2014-08-25':'2014-08-28 17:15:00']
after_missing_data = weather_ksndmc_df['2014-09-03 14:30:00': '2014-09-06']
full_data = weather_ksndmc_df['2014-08-25':'2014-09-06']
hour_p = prior_missing_data.index.hour
day_selector_p = ((06 <= hour_p) & (hour_p <= 18))
night_selector_p = ((05 >= hour_p) | (hour_p >= 19))
prior_missing_data_day = prior_missing_data[day_selector_p]
prior_missing_data_night = prior_missing_data[night_selector_p]
hour_a = after_missing_data.index.hour
day_selector_a = ((06 <= hour_a) & (hour_a <= 18))
night_selector_a = ((05 >= hour_a) | (hour_a >= 19))
after_missing_data_day = after_missing_data[day_selector_a]
after_missing_data_night = after_missing_data[night_selector_a]
day_df = pd.concat((prior_missing_data_day, after_missing_data_day))
day_wind_speed = day_df['WIND_SPEED'].values
day_mean = np.mean(day_wind_speed)
day_sigma = np.std(day_wind_speed)
day_variance = day_sigma ** 2
night_df = pd.concat((prior_missing_data_night, after_missing_data_night))
night_wind_speed = night_df['WIND_SPEED'].values
night_mean = np.mean(night_wind_speed)
night_sigma = np.std(night_wind_speed)
night_variance = night_sigma ** 2
# Plot
print("Day Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f \n" % (day_mean, day_sigma, day_variance) + "Night Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f" % (night_mean, night_sigma, night_variance))
fig = plt.figure()
plt.plot(sorted(day_wind_speed), stats.norm.pdf(sorted(day_wind_speed), day_mean, day_sigma), 'g-o', label='Day')
night_fit = stats.norm.pdf(sorted(night_wind_speed), night_mean, night_sigma)
plt.plot(sorted(night_wind_speed), night_fit, '-ro', label="Night")
# plt.text(0.25, 1.1, "Day Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f \n" % (day_mean, day_sigma, day_variance) + "Night Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f" % (night_mean, night_sigma, night_variance), fontsize=28)
plt.title("PDF of wind speed for time period 2014-08-25 : 2014-09-06")
plt.legend().draggable()
plt.show()
raise SystemExit(0)
"""
Missing period from Jan 06 to Jan 12
"""
prior_missing_data = weather_ksndmc_df['2014-12-25':'2015-01-06 21:45:00']
after_missing_data = weather_ksndmc_df['2015-01-12 13:00:00': '2015-01-27']
full_data_jan = weather_ksndmc_df['2014-12-25':'2015-01-27']
hour_p = prior_missing_data.index.hour
day_selector_p = ((06 <= hour_p) & (hour_p <= 18))
night_selector_p = ((05 >= hour_p) | (hour_p >= 19))
prior_missing_data_day = prior_missing_data[day_selector_p]
prior_missing_data_night = prior_missing_data[night_selector_p]
hour_a = after_missing_data.index.hour
day_selector_a = ((06 <= hour_a) & (hour_a <= 18))
night_selector_a = ((05 >= hour_a) | (hour_a >= 19))
after_missing_data_day = after_missing_data[day_selector_a]
after_missing_data_night = after_missing_data[night_selector_a]
day_df = pd.concat((prior_missing_data_day, after_missing_data_day))
# day_df = day_df[day_df['WIND_SPEED'] > 5.0]
day_wind_speed = day_df['WIND_SPEED'].values
day_mean = np.mean(day_wind_speed)
day_sigma = np.std(day_wind_speed, ddof=1)
day_variance = day_sigma ** 2
night_df = pd.concat((prior_missing_data_night, after_missing_data_night))
night_wind_speed = night_df['WIND_SPEED'].values
night_mean = np.mean(night_wind_speed)
night_sigma = np.std(night_wind_speed)
night_variance = night_sigma ** 2
# Plot
fig = plt.figure()
plt.plot(sorted(day_wind_speed), stats.norm.pdf(sorted(day_wind_speed), day_mean, day_sigma), 'g-o', label='Day')
night_fit = stats.norm.pdf(sorted(night_wind_speed), night_mean, night_sigma)
plt.plot(sorted(night_wind_speed), night_fit, '-ro', label="Night")
plt.text(1.04, 0.7, "Day Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f \n" % (day_mean, day_sigma, day_variance) + "Night Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f" % (night_mean, night_sigma, night_variance), fontsize=11)
plt.title("PDF of wind speed for time period 2014-12-25 : 2015-01-27")
plt.legend().draggable()
plt.show()
"""
Missing data July 31 to Aug 04
"""
prior_missing_data = weather_ksndmc_df['2014-07-27':'2014-07-31 17:00:00']
after_missing_data = weather_ksndmc_df['2014-08-04 13:00:00': '2014-08-08']
full_data_jul = weather_ksndmc_df['2014-07-27':'2014-08-08']
hour_p = prior_missing_data.index.hour
day_selector_p = ((06 <= hour_p) & (hour_p <= 18))
night_selector_p = ((05 >= hour_p) | (hour_p >= 19))
prior_missing_data_day = prior_missing_data[day_selector_p]
prior_missing_data_night = prior_missing_data[night_selector_p]
hour_a = after_missing_data.index.hour
day_selector_a = ((06 <= hour_a) & (hour_a <= 18))
night_selector_a = ((05 >= hour_a) | (hour_a >= 19))
after_missing_data_day = after_missing_data[day_selector_a]
after_missing_data_night = after_missing_data[night_selector_a]
day_df = pd.concat((prior_missing_data_day, after_missing_data_day))
# day_df = day_df[day_df['WIND_SPEED'] > 5.0]
day_wind_speed = day_df['WIND_SPEED'].values
day_mean = np.mean(day_wind_speed)
day_sigma = np.std(day_wind_speed, ddof=1)
day_variance = day_sigma ** 2
night_df = pd.concat((prior_missing_data_night, after_missing_data_night))
night_wind_speed = night_df['WIND_SPEED'].values
night_mean = np.mean(night_wind_speed)
night_sigma = np.std(night_wind_speed)
night_variance = night_sigma ** 2
# Plot
fig = plt.figure()
plt.plot(sorted(day_wind_speed), stats.norm.pdf(sorted(day_wind_speed), day_mean, day_sigma), 'g-o', label='Day')
night_fit = stats.norm.pdf(sorted(night_wind_speed), night_mean, night_sigma)
plt.plot(sorted(night_wind_speed), night_fit, '-ro', label="Night")
plt.text(1.4, 0.9, "Day Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f \n" % (day_mean, day_sigma, day_variance) + "Night Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f" % (night_mean, night_sigma, night_variance), fontsize=11)
plt.title("PDF of wind speed for time period 2014-07-27:2014-08-08")
plt.legend().draggable()
plt.show()
"""
Missing data from July 16 to July 17
"""
prior_missing_data = weather_ksndmc_df['2014-07-09':'2014-07-16 12:15:00']
after_missing_data = weather_ksndmc_df['2014-07-17 20:45:00': '2014-07-24']
full_data_jul_16 = weather_ksndmc_df['2014-07-03':'2014-07-30']
hour_p = prior_missing_data.index.hour
day_selector_p = ((06 <= hour_p) & (hour_p <= 18))
night_selector_p = ((05 >= hour_p) | (hour_p >= 19))
prior_missing_data_day = prior_missing_data[day_selector_p]
prior_missing_data_night = prior_missing_data[night_selector_p]
hour_a = after_missing_data.index.hour
day_selector_a = ((06 <= hour_a) & (hour_a <= 18))
night_selector_a = ((05 >= hour_a) | (hour_a >= 19))
after_missing_data_day = after_missing_data[day_selector_a]
after_missing_data_night = after_missing_data[night_selector_a]
day_df = pd.concat((prior_missing_data_day, after_missing_data_day))
# day_df = day_df[day_df['WIND_SPEED'] > 5.0]
day_wind_speed = day_df['WIND_SPEED'].values
day_mean = np.mean(day_wind_speed)
day_sigma = np.std(day_wind_speed, ddof=1)
day_variance = day_sigma ** 2
night_df = pd.concat((prior_missing_data_night, after_missing_data_night))
night_wind_speed = night_df['WIND_SPEED'].values
night_mean = np.mean(night_wind_speed)
night_sigma = np.std(night_wind_speed)
night_variance = night_sigma ** 2
# Plot
fig = plt.figure()
plt.plot(sorted(day_wind_speed), stats.norm.pdf(sorted(day_wind_speed), day_mean, day_sigma), 'g-o', label='Day')
night_fit = stats.norm.pdf(sorted(night_wind_speed), night_mean, night_sigma)
plt.plot(sorted(night_wind_speed), night_fit, '-ro', label="Night")
plt.text(1.4, 0.9, "Day Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f \n" % (day_mean, day_sigma, day_variance) + "Night Mean = %0.2f, Sigma = %0.2f, and Variance = %0.2f" % (night_mean, night_sigma, night_variance), fontsize=11)
plt.title("PDF of wind speed for time period 2014-07-03 : 2014-07-30")
plt.legend().draggable()
plt.show() | gpl-3.0 |
ahmadia/bokeh | examples/plotting/file/unemployment.py | 46 | 1846 | from collections import OrderedDict
import numpy as np
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.models import HoverTool
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
source = ColumnDataSource(
data=dict(month=month, year=year, color=color, rate=rate)
)
output_file('unemployment.html')
TOOLS = "resize,hover,save,pan,box_zoom,wheel_zoom"
p = figure(title="US Unemployment (1948 - 2013)",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
toolbar_location="left", tools=TOOLS)
p.rect("year", "month", 1, 1, source=source,
color="color", line_color=None)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('date', '@month @year'),
('rate', '@rate'),
])
show(p) # show the plot
| bsd-3-clause |
OpenDrift/opendrift | opendrift/models/oceandrift.py | 1 | 26233 | # This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
import sys
from datetime import timedelta
import numpy as np
from scipy.interpolate import interp1d
import logging; logger = logging.getLogger(__name__)
from opendrift.models.basemodel import OpenDriftSimulation
from opendrift.elements import LagrangianArray
from opendrift.models.physics_methods import verticaldiffusivity_Large1994, verticaldiffusivity_Sundby1983, gls_tke
# Defining the oil element properties
class Lagrangian3DArray(LagrangianArray):
"""Extending LagrangianArray for elements moving in 3 dimensions
The Particle may be buoyant and/or subject to vertical mixing
buoyant bahaviour is described by terminal velocity
"""
variables = LagrangianArray.add_variables([
('wind_drift_factor', {'dtype': np.float32,
'units': '1',
'description': 'Elements at surface are moved with this '
'fraction of the vind vector, in addition to currents '
'and Stokes drift',
'default': 0.02}),
('terminal_velocity', {'dtype': np.float32,
'units': 'm/s',
'description': 'Terminal rise/sinking velocity (buoyancy) '
'in the ocean column',
'default': 0.})])
class OceanDrift(OpenDriftSimulation):
"""Open source buoyant particle trajectory model based on OpenDrift.
Developed at MET Norway
Generic module for particles that move in 3 dimensions
and may be to vertical turbulent mixing
with the possibility for positive or negative buoyancy
Particles could be e.g. oil droplets, plankton, nutrients or sediments,
Model may be subclassed for more specific behaviour.
"""
ElementType = Lagrangian3DArray
max_speed = 1 # m/s
required_variables = {
'x_sea_water_velocity': {'fallback': 0},
'y_sea_water_velocity': {'fallback': 0},
'x_wind': {'fallback': 0},
'y_wind': {'fallback': 0},
'upward_sea_water_velocity': {'fallback': 0},
'ocean_vertical_diffusivity': {'fallback': 0,
'profiles': True},
'sea_surface_wave_significant_height': {'fallback': 0},
'sea_surface_wave_stokes_drift_x_velocity': {'fallback': 0},
'sea_surface_wave_stokes_drift_y_velocity': {'fallback': 0},
'sea_surface_wave_period_at_variance_spectral_density_maximum':
{'fallback': 0},
'sea_surface_wave_mean_period_from_variance_spectral_density_second_frequency_moment': {'fallback': 0},
'surface_downward_x_stress': {'fallback': 0},
'surface_downward_y_stress': {'fallback': 0},
'turbulent_kinetic_energy': {'fallback': 0},
'turbulent_generic_length_scale': {'fallback': 0},
'sea_floor_depth_below_sea_level': {'fallback': 10000},
'land_binary_mask': {'fallback': None},
}
# The depth range (in m) which profiles shall cover
required_profiles_z_range = [-20, 0]
def __init__(self, *args, **kwargs):
# Calling general constructor of parent class
super(OceanDrift, self).__init__(*args, **kwargs)
self._add_config({
'drift:vertical_advection': {'type': 'bool', 'default': True, 'description':
'Advect elements with vertical component of ocean current.',
'level': self.CONFIG_LEVEL_BASIC},
'drift:vertical_mixing': {'type': 'bool', 'default': False, 'level': self.CONFIG_LEVEL_BASIC,
'description': 'Activate vertical mixing scheme with inner loop'},
'vertical_mixing:timestep': {'type': 'float', 'min': 0.1, 'max': 3600, 'default': 60,
'level': self.CONFIG_LEVEL_ADVANCED, 'units': 'seconds', 'description':
'Time step used for inner loop of vertical mixing.'},
'vertical_mixing:diffusivitymodel': {'type': 'enum', 'default': 'environment',
'enum': ['environment', 'stepfunction', 'windspeed_Sundby1983',
'windspeed_Large1994', 'gls_tke','constant'], 'level': self.CONFIG_LEVEL_ADVANCED,
'units': 'seconds', 'description': 'Algorithm/source used for profile of vertical diffusivity. Environment means that diffusivity is aquired from readers or environment constants/fallback.'},
'vertical_mixing:TSprofiles': {'type': 'bool', 'default': False, 'level':
self.CONFIG_LEVEL_ADVANCED,
'description': 'Update T and S profiles within inner loop of vertical mixing. This takes more time, but may be slightly more accurate.'},
'drift:wind_drift_depth': {'type': 'float', 'default': 0.1,
'min': 0, 'max': 10, 'units': 'meters',
'description': 'The direct wind drift (windage) is linearly decreasing from the surface value (wind_drift_factor) until 0 at this depth.',
'level': self.CONFIG_LEVEL_ADVANCED},
'drift:stokes_drift': {'type': 'bool', 'default': True,
'description': 'Advection elements with Stokes drift (wave orbital motion).',
'level': self.CONFIG_LEVEL_ADVANCED},
'drift:use_tabularised_stokes_drift': {'type': 'bool', 'default': False,
'description': 'If True, Stokes drift is estimated from wind based on look-up-tables for given fetch (drift:tabularised_stokes_drift_fetch).',
'level': self.CONFIG_LEVEL_ADVANCED},
'drift:tabularised_stokes_drift_fetch': {'type': 'enum', 'enum': ['5000', '25000', '50000'], 'default': '25000',
'level': self.CONFIG_LEVEL_ADVANCED, 'description':
'The fetch length when using tabularised Stokes drift.'},
'general:seafloor_action': {'type': 'enum', 'default': 'lift_to_seafloor',
'enum': ['none', 'lift_to_seafloor', 'deactivate', 'previous'],
'description': '"deactivate": elements are deactivated; "lift_to_seafloor": elements are lifted to seafloor level; "previous": elements are moved back to previous position; "none"; seafloor is ignored.',
'level': self.CONFIG_LEVEL_ADVANCED},
'drift:truncate_ocean_model_below_m': {'type': 'float', 'default': None,
'min': 0, 'max': 10000, 'units': 'm',
'description': 'Ocean model data are only read down to at most this depth, and extrapolated below. May be specified to read less data to improve performance.',
'level': self.CONFIG_LEVEL_ADVANCED},
'seed:z': {'type': 'float', 'default': 0,
'min': -10000, 'max': 0, 'units': 'm',
'description': 'Depth below sea level where elements are released. This depth is neglected if seafloor seeding is set selected.',
'level': self.CONFIG_LEVEL_ESSENTIAL},
'seed:seafloor': {'type': 'bool', 'default': False,
'description': 'Elements are seeded at seafloor, and seeding depth (z) is neglected.',
'level': self.CONFIG_LEVEL_ESSENTIAL},
})
def update(self):
"""Update positions and properties of elements."""
# Simply move particles with ambient current
self.advect_ocean_current()
# Advect particles due to surface wind drag,
# according to element property wind_drift_factor
self.advect_wind()
# Stokes drift
self.stokes_drift()
# Turbulent Mixing
if self.get_config('drift:vertical_mixing') is True:
self.update_terminal_velocity()
self.vertical_mixing()
else: # Buoyancy
self.vertical_buoyancy()
# Vertical advection
self.vertical_advection()
def disable_vertical_motion(self):
"""Deactivate any vertical processes/advection"""
conf = {
'drift:vertical_advection': False,
'drift:vertical_mixing': False}
for co, va in conf.items():
logger.info('Setting config: %s -> %s' % (co, va))
self.set_config(co, va)
def update_terminal_velocity(self, Tprofiles=None, Sprofiles=None,
z_index=None):
"""Calculate terminal velocity due to bouyancy from own properties
and environmental variables. Sub-modules should overload
this method for particle-specific behaviour
"""
pass
def prepare_vertical_mixing(self):
pass # To be implemented by subclasses as needed
def vertical_advection(self):
"""Move particles vertically according to vertical ocean current
Vertical advection by ocean currents is normally small
compared to termical velocity
"""
if self.get_config('drift:vertical_advection') is False:
logger.debug('Vertical advection deactivated')
return
in_ocean = np.where(self.elements.z<0)[0]
if len(in_ocean) > 0:
w = self.environment.upward_sea_water_velocity[in_ocean]
self.elements.z[in_ocean] = np.minimum(0,
self.elements.z[in_ocean] + self.elements.moving[in_ocean] * w * self.time_step.total_seconds())
else:
logger.debug('No vertical advection for elements at surface')
def vertical_buoyancy(self):
"""Move particles vertically according to their buoyancy"""
in_ocean = np.where(self.elements.z<0)[0]
if len(in_ocean) > 0:
self.elements.z[in_ocean] = np.minimum(0,
self.elements.z[in_ocean] + self.elements.terminal_velocity[in_ocean] * self.time_step.total_seconds())
# check for minimum height/maximum depth for each particle
Zmin = -1.*self.environment.sea_floor_depth_below_sea_level
# Let particles stick to bottom
bottom = np.where(self.elements.z < Zmin)
if len(bottom[0]) > 0:
logger.debug('%s elements reached seafloor, set to bottom' % len(bottom[0]))
self.interact_with_seafloor()
self.bottom_interaction(Zmin)
def surface_stick(self):
'''To be overloaded by subclasses, e.g. downward mixing of oil'''
# keep particle just below the surface
surface = np.where(self.elements.z >= 0)
if len(surface[0]) > 0:
self.elements.z[surface] = -0.01
def bottom_interaction(self, Zmin=None):
'''To be overloaded by subclasses, e.g. radionuclides in sediments'''
pass
def surface_wave_mixing(self, time_step_seconds):
'''To be overloaded by subclasses, e.g. downward mixing of oil'''
pass
def get_diffusivity_profile(self, model):
depths = self.environment_profiles['z']
wind, depth = np.meshgrid(self.wind_speed(), depths)
if model == 'windspeed_Large1994':
return verticaldiffusivity_Large1994(wind, depth)
elif model == 'windspeed_Sundby1983':
return verticaldiffusivity_Sundby1983(wind, depth)
elif model == 'gls_tke':
if not hasattr(self, 'gls_parameters'):
logger.info('Searching readers for GLS parameters...')
for reader_name, reader in self.readers.items():
if hasattr(reader, 'gls_parameters'):
self.gls_parameters = reader.gls_parameters
logger.info('Found gls-parameters in ' + reader_name)
break # Success
if not hasattr(self, 'gls_parameters'):
logger.info('Did not find gls-parameters in any readers.')
self.gls_parameters = None
windstress = np.sqrt(self.environment.surface_downward_x_stress**2 +
self.environment.surface_downward_y_stress**2)
return gls_tke(windstress, depth, self.sea_water_density(),
self.environment.turbulent_kinetic_energy,
self.environment.turbulent_generic_length_scale,
gls_parameters)
else:
raise ValueError('Unknown diffusivity model: ' + model)
def vertical_mixing(self, store_depths=False):
"""Mix particles vertically according to eddy diffusivity and buoyancy
Buoyancy is expressed as terminal velocity, which is the
steady-state vertical velocity due to positive or negative
buoyant behaviour. It is usually a function of particle density,
diameter, and shape.
Vertical particle displacemend du to turbulent mixing is
calculated using a random walk scheme" (Visser et al. 1996)
"""
if self.get_config('drift:vertical_mixing') is False:
logger.debug('Turbulent mixing deactivated')
return
self.timer_start('main loop:updating elements:vertical mixing')
dt_mix = self.get_config('vertical_mixing:timestep')
# minimum height/maximum depth for each particle
Zmin = -1.*self.environment.sea_floor_depth_below_sea_level
# Eventual model specific preparions
self.prepare_vertical_mixing()
# get profile of eddy diffusivity
# get vertical eddy diffusivity from environment or specific model
diffusivity_model = self.get_config('vertical_mixing:diffusivitymodel')
if diffusivity_model == 'environment':
if not hasattr(self, 'fallback_values'):
self.set_fallback_values()
if 'ocean_vertical_diffusivity' in self.environment_profiles and not (self.environment_profiles['ocean_vertical_diffusivity'].min() == self.fallback_values['ocean_vertical_diffusivity'] and self.environment_profiles['ocean_vertical_diffusivity'].max() == self.fallback_values['ocean_vertical_diffusivity']):
Kprofiles = self.environment_profiles[
'ocean_vertical_diffusivity']
logger.debug('Using diffusivity from ocean model')
else:
logger.debug('Using diffusivity from Large1994 since model diffusivities not available')
# Using higher vertical resolution when analytical
self.environment_profiles['z'] = -np.arange(0, 50)
Kprofiles = self.get_diffusivity_profile('windspeed_Large1994')
elif diffusivity_model == 'constant':
logger.debug('Using constant diffusivity specified by fallback_values[''ocean_vertical_diffusivity''] = %s m2.s-1' % (self.fallback_values['ocean_vertical_diffusivity']))
Kprofiles = self.fallback_values['ocean_vertical_diffusivity']*np.ones(
self.environment_profiles['ocean_vertical_diffusivity'].shape) # keep constant value for ocean_vertical_diffusivity
else:
logger.debug('Using functional expression for diffusivity')
# Using higher vertical resolution when analytical
if self.environment_profiles is None:
self.environment_profiles = {}
self.environment_profiles['z'] = -np.arange(0, 50)
# Note: although analytical functions, z is discretised
Kprofiles = self.get_diffusivity_profile(diffusivity_model)
logger.debug('Diffusivities are in range %s to %s' %
(Kprofiles.min(), Kprofiles.max()))
# get profiles of salinity and temperature
# (to save interpolation time in the inner loop)
if (self.get_config('vertical_mixing:TSprofiles') is True
and 'sea_water_salinity' in self.required_variables):
Sprofiles = self.environment_profiles['sea_water_salinity']
Tprofiles = \
self.environment_profiles['sea_water_temperature']
if ('sea_water_salinity' in self.fallback_values and
Sprofiles.min() == Sprofiles.max()):
logger.debug('Salinity and temperature are fallback'
'values, skipping TSprofile')
Sprofiles = None
Tprofiles = None
else:
logger.debug('Using TSprofiles for vertical mixing')
else:
logger.debug('TSprofiles deactivated for vertical mixing')
Sprofiles = None
Tprofiles = None
# prepare vertical interpolation coordinates
z_i = range(self.environment_profiles['z'].shape[0])
if len(z_i) == 1:
z_index = 0
else:
z_index = interp1d(-self.environment_profiles['z'],
z_i, bounds_error=False,
fill_value=(0,len(z_i)-1)) # Extrapolation
# Internal loop for fast time step of vertical mixing model.
# Random walk needs faster time step than horizontal advection.
logger.debug('Vertical mixing module:' +
self.get_config('vertical_mixing:diffusivitymodel'))
ntimes_mix = np.abs(int(self.time_step.total_seconds()/dt_mix))
logger.debug('Turbulent diffusion with random walk '
'scheme using ' + str(ntimes_mix) +
' fast time steps of dt=' + str(dt_mix) + 's')
if store_depths is not False:
depths = np.zeros((ntimes_mix, self.num_elements_active()))
depths[0, :] = self.elements.z
# Calculating dK/dz for all profiles before the loop
gradK = -np.gradient(Kprofiles, self.environment_profiles['z'], axis=0)
gradK[np.abs(gradK)<1e-10] = 0
for i in range(0, ntimes_mix):
#remember which particles belong to the exact surface
surface = self.elements.z == 0
# Update the terminal velocity of particles
self.update_terminal_velocity(Tprofiles=Tprofiles, Sprofiles=Sprofiles, z_index=z_index)
w = self.elements.terminal_velocity
# Diffusivity and its gradient at z
zi = np.round(z_index(-self.elements.z)).astype(np.uint8)
Kz = Kprofiles[zi, range(Kprofiles.shape[1])]
dKdz = gradK[zi, range(Kprofiles.shape[1])]
# Visser et al. 1996 random walk mixing
# requires an inner loop time step dt such that
# dt << (d2K/dz2)^-1, e.g. typically dt << 15min
R = 2*np.random.random(self.num_elements_active()) - 1
r = 1.0/3
# New position = old position - up_K_flux + random walk
self.elements.z = self.elements.z - self.elements.moving*(
dKdz*dt_mix - R*np.sqrt((Kz*dt_mix*2/r)))
# Reflect from surface
reflect = np.where(self.elements.z >= 0)
if len(reflect[0]) > 0:
self.elements.z[reflect] = -self.elements.z[reflect]
# Reflect elements going below seafloor
bottom = np.where(self.elements.z < Zmin)
if len(bottom[0]) > 0:
logger.debug('%s elements penetrated seafloor, lifting up' % len(bottom[0]))
self.elements.z[bottom] = 2*Zmin[bottom] - self.elements.z[bottom]
# Advect due to buoyancy
self.elements.z = self.elements.z + w*dt_mix*self.elements.moving
# Put the particles that belonged to the surface slick
# (if present) back to the surface
self.elements.z[surface] = 0.
# Formation of slick and wave mixing for surfaced particles
# if implemented for this class
self.surface_stick()
self.surface_wave_mixing(dt_mix)
# Let particles stick to bottom
bottom = np.where(self.elements.z < Zmin)
if len(bottom[0]) > 0:
logger.debug('%s elements reached seafloor, set to bottom' % len(bottom[0]))
self.interact_with_seafloor()
self.bottom_interaction(Zmin)
if store_depths is not False:
depths[i, :] = self.elements.z
self.timer_end('main loop:updating elements:vertical mixing')
if store_depths is not False:
return depths
else:
return None
def animate_vertical_distribution(self, depths=None, maxdepth=None, bins=50, filename=None):
"""Function to animate vertical distribution of particles"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, (axk, axn) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 3]})
if depths is not None: # Debug mode, output from one cycle has been provided
z = depths
time_step = self.get_config('vertical_mixing:timestep')
times = [self.time + i*timedelta(seconds=time_step) for i in range(z.shape[0])]
else:
z = self.get_property('z')[0]
z = np.ma.filled(z, np.nan)
K = self.get_property('ocean_vertical_diffusivity')[0]
time_step = self.time_step.total_seconds()
times = self.get_time_array()[0]
if maxdepth is None:
maxdepth = np.nanmin(z)
if maxdepth > 0:
maxdepth = -maxdepth # negative z
if depths is not None:
axk.plot(np.nanmean(self.environment_profiles['ocean_vertical_diffusivity'], 1), self.environment_profiles['z'])
xmax = self.environment_profiles['ocean_vertical_diffusivity'].max()
else:
axk.plot(K, z, 'k.')
xmax = np.nanmax(K)
axk.set_ylim([maxdepth, 0])
axk.set_xlim([0, xmax*1.1])
axk.set_ylabel('Depth [m]')
axk.set_xlabel('Vertical diffusivity [$m^2/s$]')
hist_series = np.zeros((bins, len(times)))
bin_series = np.zeros((bins+1, len(times)))
for i in range(len(times)):
hist_series[:,i], bin_series[:,i] = np.histogram(z[i,:][np.isfinite(z[i,:])], bins=bins)
maxnum = hist_series.max()
def update_histogram(i):
axn.clear()
axn.barh(bin_series[0:-1,i], hist_series[:,i], height=-maxdepth/bins, align='edge')
axn.set_ylim([maxdepth, 0])
axn.set_xlim([0, maxnum])
axn.set_title('%s UTC' % times[i])
axn.set_xlabel('Number of particles')
#axn.set_ylabel('Depth [m]')
animation = animation.FuncAnimation(fig, update_histogram, len(times))
if filename is not None or 'sphinx_gallery' in sys.modules:
self._save_animation(animation, filename, fps=10)
else:
plt.show()
def plot_vertical_distribution(self):
"""Function to plot vertical distribution of particles"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
from pylab import axes, draw
from matplotlib import dates
fig = plt.figure()
mainplot = fig.add_axes([.15, .3, .8, .5])
sliderax = fig.add_axes([.15, .08, .75, .05])
data = self.history['z'].T[1, :]
tslider = Slider(sliderax, 'Timestep', 0, self.steps_output-1,
valinit=self.steps_output-1, valfmt='%0.0f')
try:
dz = self.get_config('vertical_mixing:verticalresolution')
except:
dz = 1.
maxrange = -100
def update(val):
tindex = int(tslider.val)
mainplot.cla()
mainplot.grid()
mainplot.hist(self.history['z'].T[tindex, :], bins=int(-maxrange/dz),
range=[maxrange, 0], orientation='horizontal')
mainplot.set_ylim([maxrange, 0])
mainplot.set_xlabel('number of particles')
mainplot.set_ylabel('depth [m]')
x_wind = self.history['x_wind'].T[tindex, :]
y_wind = self.history['y_wind'].T[tindex, :]
windspeed = np.mean(np.sqrt(x_wind**2 + y_wind**2))
mainplot.set_title(str(self.get_time_array()[0][tindex]) +
#' Percent at surface: %.1f %' % percent_at_surface)
' Mean windspeed: %.1f m/s' % windspeed)
draw()
update(0) # Plot initial distribution
tslider.on_changed(update)
plt.show()
def plotter_vertical_distribution_time(self, ax=None, mask=None,
dz=1., maxrange=-100, bins=None, step=1):
"""Function to plot vertical distribution of particles.
Use mask to plot any selection of particles.
"""
from pylab import axes, draw
from matplotlib import dates, pyplot
if ax is None:
fig = pyplot.figure()
ax = fig.gca()
show = True
else:
show = False
if mask is None: # create a mask that is True for all particles
mask = self.history['z'].T[0] == self.history['z'].T[0]
if bins is None:
bins=int(-maxrange/dz)
ax.hist(self.history['z'].T[step,mask], bins=bins,
range=[maxrange, 0], orientation='horizontal')
ax.set_ylim([maxrange, 0])
ax.grid()
#ax.set_xlim([0, mask.sum()*.15])
ax.set_xlabel('Number of particles')
ax.set_ylabel('Depth [m]')
x_wind = self.history['x_wind'].T[step, :]
y_wind = self.history['x_wind'].T[step, :]
windspeed = np.mean(np.sqrt(x_wind**2 + y_wind**2))
ax.set_title(str(self.get_time_array()[0][step]) +
' Mean windspeed: %.1f m/s' % windspeed)
if show is True:
pyplot.show()
| gpl-2.0 |
WindfallLabs/dslw | dslw/addons/_gpd.py | 1 | 8015 | # -*- coding: utf-8 -*-
"""
addons/_gpd.py -- geopandas addons for dslw (not implemented)
Copyright (c) 2016 Garin Wally
MIT License; see LICENSE
"""
from __future__ import unicode_literals
import re
from os.path import commonprefix
import sys
import apsw
from pandas import read_sql
from geopandas import GeoSeries, GeoDataFrame
from shapely.wkt import loads as wkt_loads
#from _util import assert_sql
# =============================================================================
# GLOBALS
__all__ = ['as_GeoDataFrame', 'from_GeoDataFrame']
# =============================================================================
# FUNCTIONS
def as_GeoDataFrame(sql, con, geom_col, allow_null_geom=False, crs=None,
index_col=None, coerce_float=True, params=None):
"""Reads an SQLite query as a geopandas.GeoDataFrame object.
Args:
sql (str): SQL query that will become a GeoDataFrame.
con: DB connection object or SQLAlchemy engine
geom_col (str): column name to convert to shapely geometries.
crs (int): (optional) CRS to use for the returned GeoDataFrame
Returns:
A `geopandas.GeoDataFrame` object.
See the documentation for pandas.read_sql for further explanation
of the following parameters:
index_col, coerce_float, params
NOTE: this cannot handle NULL geometries.
"""
# TODO: change geom_col to optional param, allow "FIND" option to auto-detect geom col
# TODO: if geom_col is None, just use pandas.read_sql()
#assert_sql(sql)
#assert_sql(geom_col) # TODO: allow None for non-spatial GDFs
df = read_sql(sql, con, index_col=index_col,
coerce_float=coerce_float, params=params)
# Ensure geometry column was specified correctly
if geom_col not in df:
raise ValueError("Geometry column specified does not exist: "
"'{0}'".format(geom_col))
# TODO: OR set default geom_col with conn.get_geom_col_name(table)
# Create internal cursor
_c = con.cursor()
# Get table name
from_table = sql[sql.lower().index("from"):]
table = from_table.split()[-1]
# Get crs if None
if not crs:
srid_q = "SELECT SRID({geom}) FROM {table}".format(geom=geom_col,
table=table)
crs = _c.execute(srid_q).fetchone()[0]
# Collect just the geometry column as a separate DataFrame
as_text = "ST_AsText({0})".format(geom_col)
geom_sql = "SELECT {0} {1}".format(as_text, from_table)
# Ensure there are no Null geometries. Maybe it's my bad data...
# prevents 'Windows Error: exception: access violation reading 0x0000000'
'''
_c.execute("SELECT IsValidReason({}) FROM {}".format(geom_col, table)) # TODO: remove, causes error with spatialite400x.dll (arc)
assert (u'Invalid: NULL Geometry',) not in set(_c.fetchall()), \
"Cannot open with geopandas: Null geometries found."
'''
wkt_geoms = read_sql(geom_sql, con)
# Convert to shapely geometries # Throws error is x is None
shapely_geoms = wkt_geoms[as_text].apply(lambda x: wkt_loads(x.encode()))
# Combine the geometry and the rest of the data
df[geom_col] = GeoSeries(shapely_geoms)
_c.close()
return GeoDataFrame(df, crs=crs, geometry=geom_col)
def from_GeoDataFrame(dataframe, con, table_name,
new_geom='Shape', if_exists='fail', parse_types=True):
"""Loads a GeoDataFrame into a SpatiaLite Database.
Args:
dataframe (geopandas.GeoDataFrame): data frame to convert.
con (dslw.SpatialDB): connection to database.
table_name (str): name of new db table.
new_geom (str): new geometry column name. ArcGIS requires 'Shape'.
if_exists (str): method to handle existing table_name.
Options:
'fail': allows exception to be raised.
'replace': drops table if it exists.
"""
# Assert "safe" inputs
#assert_sql(table_name)
# Replace / drop table if exists
if if_exists == 'replace':
_c = con.cursor()
_c.execute("BEGIN")
_c.execute("DROP TABLE IF EXISTS {table}".format(table=table_name))
_c.execute("COMMIT")
_c.execute("VACUUM")
_c.close()
del _c
# Copy input GeoDataFrame (gdf)
gdf = dataframe.copy()
# Rename 'Shape' field if exists
if "Shape" in gdf.columns:
gdf.rename(columns={"Shape": "old_shape"}, inplace=True)
gdf.set_geometry("old_shape", inplace=True)
# Shape Type
geom_types = gdf.geom_type.unique()
geom_type = commonprefix([g[::-1] for g in geom_types])[::-1]
if geom_type == '':
raise ValueError("Geometry column cannot contains mutiple "
"geometry types when writing to SpatiaLite.")
# Dimension (2D, 3D, 2D with Measure, 3D with Measure)
dim = "XY"
if geom_type[-1] == "Z":
dim += "Z"
elif geom_type[-1] == "M":
dim += "M"
elif geom_type.endswith("ZM"):
dim += "ZM"
# Column names
# NOTE: ArcGIS FeatureClassToFeatureclass uses the Shape field.
geom_col = gdf._geometry_column_name
create_fields = [col for col in gdf.columns]
create_values = "NULL, " + ", ".join(["?"]*len(create_fields))
# NOTE: the 'NULL' above is replaced by the autoincrement primary key
# Column types # TODO: WIP
# NOTE: although this section is optionally run (due to incorrect type
# assignments), compliance with ArcGIS requires that field types are
# explicitly assigned.
if parse_types:
type_sym = re.compile("[^a-zA-Z]|type")
SQLite_types = {'None': 'NULL', 'float': 'REAL', 'int': 'INTEGER'}
name_types = []
for col in create_fields:
col_type = str(type(gdf[col].iloc[0]))
col_type = col_type.split(".")[-1]
col_type = type_sym.sub("", col_type)
try:
name_types.append(" ".join([col, SQLite_types[col_type]]))
except KeyError:
name_types.append(" ".join([col, "TEXT"]))
create_fields = name_types
# SRID (must be "EPSG:<number>")
if type(gdf.crs) == dict:
srid = int(gdf.crs["init"].split(":")[-1])
elif type(gdf.crs) == str and "EPSG:" in gdf.crs:
srid = int(gdf.crs.split(":")[-1])
else:
raise ValueError("Currently, only EPSG SRIDs are allowed")
# Convert gdf geometry column from WKT/Shapely to str
gdf[geom_col] = gdf.apply(lambda x: str(x[geom_col]), axis=1)
# CREATE table
# NOTE: 'INTEGER PRIMARY KEY' defaults to autoincrement; OID is used for
# compliance with ArcGIS.
_c = con.cursor()
_c.execute("BEGIN")
create_sql = "CREATE TABLE {table}".format(table=table_name)
create_sql += " (OID INTEGER PRIMARY KEY, {})".format(
', '.join(create_fields))
_c.execute(create_sql)
_c.execute("COMMIT")
# INSERT data
_c.execute("BEGIN")
insert_sql = "INSERT INTO {table} VALUES ({values})".format(
table=table_name, values=create_values)
for ind, row in gdf.iterrows():
_c.execute(insert_sql, row)
_c.execute("COMMIT")
# Add new geometry column
_c.execute("BEGIN")
add_sql = ("SELECT AddGeometryColumn('{table}', '{geom}', {srid},"
"'{geom_type}', '{dim}', 1)")
add_sql = add_sql.format(table=table_name, geom=new_geom, srid=srid,
geom_type=geom_type, dim=dim)
_c.execute(add_sql)
_c.execute("COMMIT")
# Fill new geometry column
_c.execute("BEGIN")
update_sql = "UPDATE {table} SET {geom} = GeomFromText({old_geom}, {srid})"
_c.execute(update_sql.format(table=table_name, geom=new_geom,
old_geom=geom_col, srid=srid))
_c.execute("COMMIT")
# TODO: make temp table then drop listed columns (WKT, OBJECTID, etc)
_c.execute("VACUUM")
_c.close()
return
| mit |
ChanChiChoi/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
talevy/hetnet | oldsim/simcode/draw.py | 1 | 1414 | from numpy import sqrt, linspace
import numpy as np
import scipy as sp
from scipy.integrate import odeint
from pylab import plot, axis, show, savefig
import matplotlib.pyplot as plt
from math import pow
import random
from igraph import *
import sys
import glob
from urchinsim import *
def plotdiff(x, y, filepath):
''' run through set of files and average them '''
plt.figure()
labels = ['A > B', 'B > A', 'H > A', 'A > H', 'H > B', 'B > H']
for i in xrange(len(y)):
plt.plot(x,y[i], label=(labels[i]))
plt.xlabel('number of edges removed')
plt.ylabel('# genes differentially expressed')
plt.title('Model')
plt.legend(loc=0)
savefig(filepath)
def sanitize(diff):
return map(int, diff.strip().split(','))
def get_diff(filename):
f = open(filename, 'r')
diffs = f.readlines()
f.close()
diffs = map(sanitize, diffs)
return diffs
def main():
if len(sys.argv) > 1:
name = sys.argv[1]
else:
print "add filename after"
exit()
files = glob.glob('%s[0-9]*.txt' % name)
diffsum = None
for f in files:
diffs = np.array(get_diff(f))
if diffsum != None:
diffsum += diffs
else:
diffsum = diffs
diffavg = diffsum/float(len(files))
x = [3*i for i in xrange(len(diffavg[0]))]
plotdiff(x, diffavg,'urchin-diff.png')
if __name__=='__main__':
main()
| bsd-2-clause |
PatrickChrist/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Laurae2/LightGBM | tests/python_package_test/test_engine.py | 1 | 23263 | # coding: utf-8
# pylint: skip-file
import copy
import math
import os
import unittest
import lightgbm as lgb
import random
import numpy as np
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
load_iris, load_svmlight_file)
from sklearn.metrics import log_loss, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split, TimeSeriesSplit
try:
import pandas as pd
IS_PANDAS_INSTALLED = True
except ImportError:
IS_PANDAS_INSTALLED = False
try:
import cPickle as pickle
except ImportError:
import pickle
def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
class TestEngine(unittest.TestCase):
def test_binary(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'num_iteration': 50 # test num_iteration in dict here
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.15)
self.assertEqual(len(evals_result['valid_0']['binary_logloss']), 50)
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
def test_rf(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_freq': 1,
'bagging_fraction': 0.5,
'feature_fraction': 0.5,
'num_leaves': 50,
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.25)
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
def test_regression(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 16)
self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
def test_missing_value_handle(self):
X_train = np.zeros((1000, 1))
y_train = np.zeros(1000)
trues = random.sample(range(1000), 200)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 1
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
self.assertLess(ret, 0.005)
self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
def test_missing_value_handle_na(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [1, 1, 1, 1, 0, 0, 0, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_missing_value_handle_zero(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': True
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_missing_value_handle_none(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'use_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
self.assertAlmostEqual(pred[0], pred[1], places=5)
self.assertAlmostEqual(pred[-1], pred[0], places=5)
def test_categorical_handle(self):
x = [0, 1, 2, 3, 4, 5, 6, 7]
y = [0, 1, 0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': True,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_categorical_handle2(self):
x = [0, np.nan, 0, np.nan, 0, np.nan]
y = [0, 1, 0, 1, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'regression',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'min_data_per_group': 1,
'cat_smooth': 1,
'cat_l2': 0,
'max_cat_to_onehot': 1,
'zero_as_missing': False,
'categorical_column': 0
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
np.testing.assert_almost_equal(pred, y)
def test_multiclass(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.2)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_multiclass_prediction_early_stopping(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
ret = multi_logloss(y_test, gbm.predict(X_test, pred_parameter=pred_parameter))
self.assertLess(ret, 0.8)
self.assertGreater(ret, 0.5) # loss will be higher than when evaluating the full model
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 5.5}
ret = multi_logloss(y_test, gbm.predict(X_test, pred_parameter=pred_parameter))
self.assertLess(ret, 0.2)
def test_early_stopping(self):
X, y = load_breast_cancer(True)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
valid_set_name = 'valid_set'
# no early stopping
gbm = lgb.train(params, lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
self.assertEqual(gbm.best_iteration, 10)
self.assertIn(valid_set_name, gbm.best_score)
self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
# early stopping occurs
gbm = lgb.train(params, lgb_train,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
self.assertLessEqual(gbm.best_iteration, 100)
self.assertIn(valid_set_name, gbm.best_score)
self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
def test_continue_train(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
model_name = 'model.txt'
init_gbm.save_model(model_name)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
# test custom eval metrics
feval=(lambda p, d: ('mae', mean_absolute_error(p, d.get_label()), False)),
evals_result=evals_result,
init_model='model.txt')
ret = mean_absolute_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 3.5)
self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)
for l1, mae in zip(evals_result['valid_0']['l1'], evals_result['valid_0']['mae']):
self.assertAlmostEqual(l1, mae, places=5)
os.remove(model_name)
def test_continue_train_multiclass(self):
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 3,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 1.5)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_cv(self):
X, y = load_boston(True)
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
# shuffle = False, override metric in params
params_with_metric = {'metric': 'l2', 'verbose': -1}
lgb.cv(params_with_metric, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=False,
metrics='l1', verbose_eval=False)
# shuffle = True, callbacks
lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=True,
metrics='l1', verbose_eval=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
# self defined folds
tss = TimeSeriesSplit(3)
folds = tss.split(X_train)
lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds, stratified=False, verbose_eval=False)
# lambdarank
X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train'))
q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train.query'))
params_lambdarank = {'objective': 'lambdarank', 'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, stratified=False, metrics='l2', verbose_eval=False)
def test_feature_name(self):
X, y = load_boston(True)
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
feature_names = ['f_' + str(i) for i in range(13)]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
self.assertListEqual(feature_names, gbm.feature_name())
# test feature_names with whitespaces
feature_names_with_space = ['f ' + str(i) for i in range(13)]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
self.assertListEqual(feature_names, gbm.feature_name())
def test_save_load_copy_pickle(self):
def test_template(init_model=None, return_model=False):
X, y = load_boston(True)
params = {
'objective': 'regression',
'metric': 'l2',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
gbm = test_template(return_model=True)
ret_origin = test_template(init_model=gbm)
other_ret = []
gbm.save_model('lgb.model')
other_ret.append(test_template(init_model='lgb.model'))
gbm_load = lgb.Booster(model_file='lgb.model')
other_ret.append(test_template(init_model=gbm_load))
other_ret.append(test_template(init_model=copy.copy(gbm)))
other_ret.append(test_template(init_model=copy.deepcopy(gbm)))
with open('lgb.pkl', 'wb') as f:
pickle.dump(gbm, f)
with open('lgb.pkl', 'rb') as f:
gbm_pickle = pickle.load(f)
other_ret.append(test_template(init_model=gbm_pickle))
gbm_pickles = pickle.loads(pickle.dumps(gbm))
other_ret.append(test_template(init_model=gbm_pickles))
for ret in other_ret:
self.assertAlmostEqual(ret_origin, ret, places=5)
@unittest.skipIf(not IS_PANDAS_INSTALLED, 'pandas not installed')
def test_pandas_categorical(self):
X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
"B": np.random.permutation([1, 2, 3] * 100), # int
"C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float
"D": np.random.permutation([True, False] * 150)}) # bool
y = np.random.permutation([0, 1] * 150)
X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20),
"B": np.random.permutation([1, 3] * 30),
"C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": np.random.permutation([True, False] * 30)})
for col in ["A", "B", "C", "D"]:
X[col] = X[col].astype('category')
X_test[col] = X_test[col].astype('category')
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm0 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False)
pred0 = list(gbm0.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm1 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=[0])
pred1 = list(gbm1.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm2 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=['A'])
pred2 = list(gbm2.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm3 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=['A', 'B', 'C', 'D'])
pred3 = list(gbm3.predict(X_test))
gbm3.save_model('categorical.model')
gbm4 = lgb.Booster(model_file='categorical.model')
pred4 = list(gbm4.predict(X_test))
np.testing.assert_almost_equal(pred0, pred1)
np.testing.assert_almost_equal(pred0, pred2)
np.testing.assert_almost_equal(pred0, pred3)
np.testing.assert_almost_equal(pred0, pred4)
def test_reference_chain(self):
X = np.random.normal(size=(100, 2))
y = np.random.normal(size=100)
tmp_dat = lgb.Dataset(X, y)
# take subsets and train
tmp_dat_train = tmp_dat.subset(np.arange(80))
tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
params = {'objective': 'regression_l2', 'metric': 'rmse'}
gbm = lgb.train(params, tmp_dat_train, num_boost_round=20, valid_sets=[tmp_dat_train, tmp_dat_val])
def test_contribs(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1,
'num_iteration': 50 # test num_iteration in dict here
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
self.assertLess(np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1)), 1e-4)
| mit |
commaai/panda | tests/safety/test_tesla.py | 1 | 6109 | #!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
import panda.tests.safety.common as common
from panda.tests.safety.common import CANPackerPanda
ANGLE_DELTA_BP = [0., 5., 15.]
ANGLE_DELTA_V = [5., .8, .15] # windup limit
ANGLE_DELTA_VU = [5., 3.5, 0.4] # unwind limit
class CONTROL_LEVER_STATE:
DN_1ST = 32
UP_1ST = 16
DN_2ND = 8
UP_2ND = 4
RWD = 2
FWD = 1
IDLE = 0
def sign(a):
return 1 if a > 0 else -1
class TestTeslaSafety(common.PandaSafetyTest):
TX_MSGS = [[0x488, 0], [0x45, 0], [0x45, 2]]
STANDSTILL_THRESHOLD = 0
GAS_PRESSED_THRESHOLD = 3
RELAY_MALFUNCTION_ADDR = 0x488
RELAY_MALFUNCTION_BUS = 0
FWD_BLACKLISTED_ADDRS = {2: [0x488]}
FWD_BUS_LOOKUP = {0: 2, 2: 0}
def setUp(self):
self.packer = CANPackerPanda("tesla_can")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_TESLA, 0)
self.safety.init_tests()
def _angle_meas_msg(self, angle):
values = {"EPAS_internalSAS": angle}
return self.packer.make_can_msg_panda("EPAS_sysStatus", 0, values)
def _set_prev_angle(self, t):
t = int(t * 10)
self.safety.set_desired_angle_last(t)
def _angle_meas_msg_array(self, angle):
for _ in range(6):
self._rx(self._angle_meas_msg(angle))
def _pcm_status_msg(self, enable):
values = {"DI_cruiseState": 2 if enable else 0}
return self.packer.make_can_msg_panda("DI_state", 0, values)
def _lkas_control_msg(self, angle, enabled):
values = {"DAS_steeringAngleRequest": angle, "DAS_steeringControlType": 1 if enabled else 0}
return self.packer.make_can_msg_panda("DAS_steeringControl", 0, values)
def _speed_msg(self, speed):
values = {"ESP_vehicleSpeed": speed * 3.6}
return self.packer.make_can_msg_panda("ESP_B", 0, values)
def _brake_msg(self, brake):
values = {"driverBrakeStatus": 2 if brake else 1}
return self.packer.make_can_msg_panda("BrakeMessage", 0, values)
def _gas_msg(self, gas):
values = {"DI_pedalPos": gas}
return self.packer.make_can_msg_panda("DI_torque1", 0, values)
def _control_lever_cmd(self, command):
values = {"SpdCtrlLvr_Stat": command}
return self.packer.make_can_msg_panda("STW_ACTN_RQ", 0, values)
def _autopilot_status_msg(self, status):
values = {"autopilotStatus": status}
return self.packer.make_can_msg_panda("AutopilotStatus", 2, values)
def test_angle_cmd_when_enabled(self):
# when controls are allowed, angle cmd rate limit is enforced
speeds = [0., 1., 5., 10., 15., 50.]
angles = [-300, -100, -10, 0, 10, 100, 300]
for a in angles:
for s in speeds:
max_delta_up = np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_V)
max_delta_down = np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_VU)
# first test against false positives
self._angle_meas_msg_array(a)
self._rx(self._speed_msg(s))
self._set_prev_angle(a)
self.safety.set_controls_allowed(1)
# Stay within limits
# Up
self.assertEqual(True, self._tx(self._lkas_control_msg(a + sign(a) * max_delta_up, 1)))
self.assertTrue(self.safety.get_controls_allowed())
# Don't change
self.assertEqual(True, self._tx(self._lkas_control_msg(a, 1)))
self.assertTrue(self.safety.get_controls_allowed())
# Down
self.assertEqual(True, self._tx(self._lkas_control_msg(a - sign(a) * max_delta_down, 1)))
self.assertTrue(self.safety.get_controls_allowed())
# Inject too high rates
# Up
self.assertEqual(False, self._tx(self._lkas_control_msg(a + sign(a) * (max_delta_up + 1.1), 1)))
self.assertFalse(self.safety.get_controls_allowed())
# Don't change
self.safety.set_controls_allowed(1)
self._set_prev_angle(a)
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self._tx(self._lkas_control_msg(a, 1)))
self.assertTrue(self.safety.get_controls_allowed())
# Down
self.assertEqual(False, self._tx(self._lkas_control_msg(a - sign(a) * (max_delta_down + 1.1), 1)))
self.assertFalse(self.safety.get_controls_allowed())
# Check desired steer should be the same as steer angle when controls are off
self.safety.set_controls_allowed(0)
self.assertEqual(True, self._tx(self._lkas_control_msg(a, 0)))
def test_angle_cmd_when_disabled(self):
self.safety.set_controls_allowed(0)
self._set_prev_angle(0)
self.assertFalse(self._tx(self._lkas_control_msg(0, 1)))
self.assertFalse(self.safety.get_controls_allowed())
def test_acc_buttons(self):
self.safety.set_controls_allowed(1)
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.FWD))
self.assertTrue(self.safety.get_controls_allowed())
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.RWD))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.UP_1ST))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.UP_2ND))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.DN_1ST))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.DN_2ND))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._tx(self._control_lever_cmd(CONTROL_LEVER_STATE.IDLE))
self.assertTrue(self.safety.get_controls_allowed())
def test_autopilot_passthrough(self):
for ap_status in range(16):
self.safety.set_controls_allowed(1)
self._rx(self._autopilot_status_msg(ap_status))
self.assertEqual(self.safety.get_controls_allowed(), (ap_status not in [3, 4, 5]))
if __name__ == "__main__":
unittest.main()
| mit |
nchaparr/Sam_Output_Anls | Cont_Stat.py | 1 | 4094 | from netCDF4 import Dataset
import glob,os.path
import numpy as np
from scipy.interpolate import UnivariateSpline
import matplotlib
import matplotlib.pyplot as plt
#import site
#site.addsitedir('/tera/phil/nchaparr/python')
import nchap_fun as nc
from nchap_class import *
"""
For contour plotting statistics output from an ensemble of runs
Editing it for plotting scaled TKE
"""
#TODO: may be obsolete
def Get_Var_Arrays(var, fignum):
"""Pulls stats output from a ensemble of cases, gets ensemble averages and does contour plots
on height, time grid
Arguments:
var -- key in ''
fignum -- integter for figure
"""
#create list of filenames
ncfile_list = ["/newtera/tera/phil/nchaparr/sam_grex_ensemble/sam_case"+ str(i+2) + "/OUT_STAT/NCHAPP1_testing_doscamiopdata.nc" for i in range(1)]
#create lists for variable arrays from each case
vars_list = []
height_list = []
press_list = []
time_list = []
for i in range(len(ncfile_list)): #loop over list of nc files
thefile = ncfile_list[i]
print thefile
ncdata = Dataset(thefile,'r')
Vars = ncdata.variables[var][...]
#print Vars.shape
press = ncdata.variables['PRES'][...]
height = ncdata.variables['z'][...]
top = np.where(abs(height - 2000) < 50)[0][0]
Vars = Vars[:, 0:top]
height = height[0:top]
time = ncdata.variables['time'][...]
ncdata.close()
vars_list.append(Vars)
height_list.append(height)
time_list.append(time)
press_list.append(press)
#get ensemble averages
ens_vars = nc.Ensemble1_Average(vars_list)
ens_press = nc.Ensemble1_Average(press_list)
ens_press = np.transpose(ens_press)
#TODO: verify this is in time order!
#print 'ENSEMBLE AVERAGED', ens_vars.shape
time = (time_list[0]-169)*24
height = height_list[0] #TODO: time, height don't need to be averaged
#set up plot
theAx = nc.Do_Plot(fignum, 'Scaled ' + var + ' vs Scaled Height', 'Height/h', var+'/w*2', 111)
#print ens_vars.shape, height.shape
have_ens_vars = []
print len(time)
for i in range(len(time)):
if np.mod(i+1, 6)==0:
#print i, time[i], 1.0*(i+1)/10, "plotting"
#points = For_Plots("Mar52014")
#rinovals = points.rinovals()
#print len(rinovals[:,2])
#AvProfVars = points.AvProfVars()
#invrinos: [rino, invrino, wstar, S, tau, mltheta, deltatheta, pi3, pi4]
#wstar= rinovals[1.0*((i+1))*(6.0/6.0)-1, 2]
#h= AvProfVars[1.0*((i+1))*(6.0/6.0)-1, 1]
#h_index = np.where(height==h)[0]
print time[i]
have_ens_vars.append(1.0*np.sum(ens_vars[i]))
#print have_ens_vars[i]
theAx.plot(1.0*ens_vars[i], height, label=str(int(time[i])+1) + 'hrs')
#height, time = np.meshgrid(height, time)
#maxlev = np.max(ens_vars)
#minlev = np.min(ens_vars)
#step = (maxlev- minlev)/20
#levels = [i for i in np.arange(minlev, maxlev, step)]
#CS = plt.contourf(time, height, ens_vars, levels, cmap=plt.cm.bone)
#cbar = plt.colorbar(CS)
#cbar.ax.set_ylabel('colorbar')
#print 'plotting'
#theAx.plot(time, have_ens_vars, label=var)
plt.ylim(0, 2000)
plt.legend(loc = 'upper right', prop={'size':8})
plt.show()
#theAx = nc.Do_Plot(1, 'Layer Averaged, Scaled TKE Terms vs Time', 'TKE Term/w*3', 'Time (hrs)',111)
var_list = [ 'TKE', 'THETAV']
#BUOYA', 'BUOYAS', 'DISSIP', 'DISSIPS','DISSIPS', 'BUOYAS', TKE', 'TKES','TKE', 'TKES', 'WVADV', 'WUADV', 'WUPRES', 'WVPRES', 'WUSHEAR', 'WVSHEAR', 'W2ADV', 'W2PRES', 'W2BUOY', 'WVBUOY', 'WUBUOY', 'W2REDIS', 'W2DIFF'
for i in range(len(var_list)):
Get_Var_Arrays(var_list[i], i)
#plt.ylim()
#plt.ylim(-.000035, .000035)
#plt.legend(loc='lower right')
#plt.show()
| mit |
alkamid/wiktionary | statPlot.py | 1 | 1816 | import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
from matplotlib.ticker import MultipleLocator
# we should have this plot in Polish and English
terms = { 'pages': {'en': 'pages', 'pl': 'strony'},
'entries': {'en': 'entries', 'pl': 'hasła'},
'ylabel': {'en': 'number of pages and entries in thousands', 'pl': 'liczba stron i haseł w tys.'},
'title': {'en': 'Polish Wiktionary growth', 'pl': 'Rozwój Wikisłownika'}
}
def monthly_stat_plot(filename='stat-data.csv', lang='pl'):
data = np.genfromtxt(filename, dtype={'names': ('date', 'pages', 'sections'),
'formats': ('U7', 'i4', 'i4')}, delimiter=',')
# convert strings to datetime
dates = np.array([dt.datetime.strptime(d, '%m-%Y') for d in data['date']])
# we are missing the data on sections
mask = np.equal(data['sections'], -1)
# set up the plot
fig, ax = plt.subplots()
ax2 = ax.twinx()
# set nice colors from ColorBrewer2
colors = ['#67a9cf', '#ef8a62']
# plot section count
ax.plot(dates[~mask], data['sections'][~mask], lw=2.5, color=colors[1], label=terms['entries'][lang])
# plot page count and fill the area below
ax.plot(dates, data['pages'], lw=2.5, color=colors[0], label=terms['pages'][lang])
ax.fill_between(dates, data['pages'], alpha=0.5, color=colors[0])
# y axis label and plot title
ax.set_ylabel(terms['ylabel'][lang])
plt.suptitle(terms['title'][lang], fontsize=18)
ax2.set_ylim(ax.get_ylim())
ax2.set_yticks([data['sections'][-1], data['pages'][-1]])
ax.grid(True)
legend = ax.legend(loc='upper left')
plt.savefig('Wzrost_Wikislownika.svg')
if __name__ == '__main__':
monthly_stat_plot()
plt.show()
| mit |
jmcorgan/gnuradio | gr-digital/examples/berawgn.py | 32 | 4886 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
BER simulation for QPSK signals, compare to theoretical values.
Change the N_BITS value to simulate more bits per Eb/N0 value,
thus allowing to check for lower BER values.
Lower values will work faster, higher values will use a lot of RAM.
Also, this app isn't highly optimized--the flow graph is completely
reinstantiated for every Eb/N0 value.
Of course, expect the maximum value for BER to be one order of
magnitude below what you chose for N_BITS.
"""
import math
import numpy
from gnuradio import gr, digital
from gnuradio import analog
from gnuradio import blocks
import sys
try:
from scipy.special import erfc
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
# Best to choose powers of 10
N_BITS = 1e7
RAND_SEED = 42
def berawgn(EbN0):
""" Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
return 0.5 * erfc(math.sqrt(10**(float(EbN0)/10)))
class BitErrors(gr.hier_block2):
""" Two inputs: true and received bits. We compare them and
add up the number of incorrect bits. Because integrate_ff()
can only add up a certain number of values, the output is
not a scalar, but a sequence of values, the sum of which is
the BER. """
def __init__(self, bits_per_byte):
gr.hier_block2.__init__(self, "BitErrors",
gr.io_signature(2, 2, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_int))
# Bit comparison
comp = blocks.xor_bb()
intdump_decim = 100000
if N_BITS < intdump_decim:
intdump_decim = int(N_BITS)
self.connect(self,
comp,
blocks.unpack_k_bits_bb(bits_per_byte),
blocks.uchar_to_float(),
blocks.integrate_ff(intdump_decim),
blocks.multiply_const_ff(1.0/N_BITS),
self)
self.connect((self, 1), (comp, 1))
class BERAWGNSimu(gr.top_block):
" This contains the simulation flow graph "
def __init__(self, EbN0):
gr.top_block.__init__(self)
self.const = digital.qpsk_constellation()
# Source is N_BITS bits, non-repeated
data = map(int, numpy.random.randint(0, self.const.arity(), N_BITS/self.const.bits_per_symbol()))
src = blocks.vector_source_b(data, False)
mod = digital.chunks_to_symbols_bc((self.const.points()), 1)
add = blocks.add_vcc()
noise = analog.noise_source_c(analog.GR_GAUSSIAN,
self.EbN0_to_noise_voltage(EbN0),
RAND_SEED)
demod = digital.constellation_decoder_cb(self.const.base())
ber = BitErrors(self.const.bits_per_symbol())
self.sink = blocks.vector_sink_f()
self.connect(src, mod, add, demod, ber, self.sink)
self.connect(noise, (add, 1))
self.connect(src, (ber, 1))
def EbN0_to_noise_voltage(self, EbN0):
""" Converts Eb/N0 to a complex noise voltage (assuming unit symbol power) """
return 1.0 / math.sqrt(self.const.bits_per_symbol() * 10**(float(EbN0)/10))
def simulate_ber(EbN0):
""" All the work's done here: create flow graph, run, read out BER """
print "Eb/N0 = %d dB" % EbN0
fg = BERAWGNSimu(EbN0)
fg.run()
return numpy.sum(fg.sink.data())
if __name__ == "__main__":
EbN0_min = 0
EbN0_max = 15
EbN0_range = range(EbN0_min, EbN0_max+1)
ber_theory = [berawgn(x) for x in EbN0_range]
print "Simulating..."
ber_simu = [simulate_ber(x) for x in EbN0_range]
f = pylab.figure()
s = f.add_subplot(1,1,1)
s.semilogy(EbN0_range, ber_theory, 'g-.', label="Theoretical")
s.semilogy(EbN0_range, ber_simu, 'b-o', label="Simulated")
s.set_title('BER Simulation')
s.set_xlabel('Eb/N0 (dB)')
s.set_ylabel('BER')
s.legend()
s.grid()
pylab.show()
| gpl-3.0 |
tawsifkhan/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
bootphon/abx_numpy | examples/2D_normal_example.py | 1 | 1680 | """
This example apply the abx evaluation on 2D data sampled from gaussian distributions (diagonal covariance)
"""
from __future__ import print_function
import abx_numpy
import numpy as np
def sample_data(parameters):
data = []
n_samples = []
for klass in parameters:
sample = np.empty((klass['N'], 2))
for i in range(2):
sample[:, i] = np.random.normal(klass['mean'][i],
klass['std'][i],
klass['N'])
data.append(sample)
n_samples.append(klass['N'])
classes = np.repeat(np.arange(len(parameters)), repeats=n_samples)
data = np.concatenate(data, axis=0)
return classes, data
def plot_data(parameters, data):
import matplotlib.pyplot as plt
assert len(parameters) <= 3, 'Cannot plot more than 3 classes'
i = 0
colors = ['r', 'g', 'b']
for n_klass, klass in enumerate(parameters):
plt.plot(*data[i:i+klass['N']].T, marker='o',
color=colors[n_klass], ls='',
label='class {}'.format(n_klass+1))
i += klass['N']
plt.legend(numpoints=1)
plt.title('Normally distributed data points (diagonal covariance)')
plt.show()
def evaluate():
parameters = [
{'mean': [1, 1], 'std': [0.5, 1], 'N': 100},
{'mean': [1, 3], 'std': [1, 1], 'N': 150},
{'mean': [3, 2], 'std': [0.5, 0.5], 'N': 200}
]
classes, data = sample_data(parameters)
plot_data(parameters, data)
results = abx_numpy.abx(classes, data, lambda x, y: np.linalg.norm(x - y))
print(results)
if __name__ == '__main__':
evaluate()
| gpl-3.0 |
endlessm/chromium-browser | third_party/llvm/libc/AOR_v20.02/math/tools/plot.py | 1 | 1404 | #!/usr/bin/python
# ULP error plot tool.
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import numpy as np
import matplotlib.pyplot as plt
import sys
import re
# example usage:
# build/bin/ulp -e .0001 log 0.5 2.0 2345678 | math/tools/plot.py
def fhex(s):
return float.fromhex(s)
def parse(f):
xs = []
gs = []
ys = []
es = []
# Has to match the format used in ulp.c
r = re.compile(r'[^ (]+\(([^ )]*)\) got ([^ ]+) want ([^ ]+) [^ ]+ ulp err ([^ ]+)')
for line in f:
m = r.match(line)
if m:
x = fhex(m.group(1))
g = fhex(m.group(2))
y = fhex(m.group(3))
e = float(m.group(4))
xs.append(x)
gs.append(g)
ys.append(y)
es.append(e)
elif line.startswith('PASS') or line.startswith('FAIL'):
# Print the summary line
print(line)
return xs, gs, ys, es
def plot(xs, gs, ys, es):
if len(xs) < 2:
print('not enough samples')
return
a = min(xs)
b = max(xs)
fig, (ax0,ax1) = plt.subplots(nrows=2)
es = np.abs(es) # ignore the sign
emax = max(es)
ax0.text(a+(b-a)*0.7, emax*0.8, '%s\n%g'%(emax.hex(),emax))
ax0.plot(xs,es,'r.')
ax0.grid()
ax1.plot(xs,ys,'r.',label='want')
ax1.plot(xs,gs,'b.',label='got')
ax1.grid()
ax1.legend()
plt.show()
xs, gs, ys, es = parse(sys.stdin)
plot(xs, gs, ys, es)
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/optimize/nonlin.py | 34 | 46681 | r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import callable, exec_, xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getargspec_no_self as _getargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
doi:10.1016/j.jcp.2003.08.010
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
doi:10.1137/S0895479803422014
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
args, varargs, varkw, defaults = _getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| mit |
jseabold/scipy | scipy/interpolate/fitpack.py | 16 | 46294 | #!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(+_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def _ntlist(l): # return non-trivial list
return l
# if len(l)>1: return l
# return l[0]
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c)))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c)))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return _ntlist(list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c)))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('K nots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of
:math:`\cos(x) = \sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
dt = t[k+1:-1] - t[1:-k-1]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, [0]*k]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt) / (k + 1)
c = np.r_[0, c, [c[-1]]*(k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| bsd-3-clause |
freeman-lab/dask | dask/dataframe/core.py | 1 | 41050 | from __future__ import division
from itertools import count
from math import ceil, sqrt
from functools import wraps
import bisect
import os
from toolz import (merge, partial, accumulate, unique, first, dissoc, valmap,
first, partition)
import toolz
from operator import getitem, setitem
from datetime import datetime
import pandas as pd
import numpy as np
import operator
import gzip
import bz2
import bcolz
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import async
from .. import threaded
from ..compatibility import unicode, apply
from ..utils import repr_long_list, IndexCallable, pseudorandom
from .utils import shard_df_on_index
from ..context import _globals
def _concat(args):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if len(args) == 1:
return args[0]
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
result = pd.concat(map(pd.Series, args))
result = type(args[0])(result.values)
result.name = args[0].name
return result
return args
def compute(*args, **kwargs):
""" Compute multiple dataframes at once """
if len(args) == 1 and isinstance(args[0], (tuple, list)):
args = args[0]
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return list(map(_concat, results))
tokens = ('-%d' % i for i in count(1))
class Scalar(object):
""" A Dask-thing to represent a scalar
TODO: Clean up this abstraction
"""
def __init__(self, dsk, _name):
self.dask = dsk
self._name = _name
self.divisions = [None, None]
@property
def _args(self):
return (self.dask, self._name)
def _keys(self):
return [(self._name, 0)]
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
from .optimize import optimize
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
class _Frame(object):
""" Superclass for DataFrame and Series """
@property
def npartitions(self):
return len(self.divisions) - 1
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
from .optimize import optimize
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
def index(self):
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name, None, self.divisions)
@property
def known_divisions(self):
return len(self.divisions) > 0 and self.divisions[0] is not None
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + next(tokens)
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
dsk2 = dict((key, (getitem, cache, (tuple, list(key))))
for key in self._keys())
return type(self)(dsk2, name, self.column_info, self.divisions)
@wraps(pd.DataFrame.drop_duplicates)
def drop_duplicates(self):
chunk = lambda s: s.drop_duplicates()
return aca(self, chunk=chunk, aggregate=chunk, columns=self.columns)
def __len__(self):
return reduction(self, len, np.sum).compute()
def map_partitions(self, func, columns=None):
""" Apply Python function on each DataFrame block
Provide columns of the output if they are not the same as the input.
"""
if columns is None:
columns = self.column_info
name = 'map_partitions' + next(tokens)
dsk = dict(((name, i), (func, (self._name, i)))
for i in range(self.npartitions))
return type(self)(merge(dsk, self.dask), name,
columns, self.divisions)
def random_split(self, p, seed=None):
""" Pseudorandomly split dataframe into different pieces row-wise
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent seed
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], seed=123) # doctest: +SKIP
"""
seeds = np.random.RandomState(seed).randint(0, np.iinfo(np.int32).max,
self.npartitions)
dsk_full = dict(((self._name + '-split-full', i),
(pd_split, (self._name, i), p, seed))
for i, seed in enumerate(seeds))
dsks = [dict(((self._name + '-split-%d' % i, j),
(getitem, (self._name + '-split-full', j), i))
for j in range(self.npartitions))
for i in range(len(p))]
return [type(self)(merge(self.dask, dsk_full, dsk),
self._name + '-split-%d' % i,
self.column_info,
self.divisions)
for i, dsk in enumerate(dsks)]
def head(self, n=10, compute=True):
""" First n rows of the dataset
Caveat, the only checks the first n rows of the first partition.
"""
name = 'head' + next(tokens)
dsk = {(name, 0): (head, (self._name, 0), n)}
result = type(self)(merge(self.dask, dsk), name,
self.column_info, self.divisions[:2])
if compute:
result = result.compute()
return result
def _loc(self, ind):
""" Helper function for the .loc accessor """
if isinstance(ind, Series):
return self._loc_series(ind)
elif isinstance(ind, slice):
return self._loc_slice(ind)
else:
return self._loc_element(ind)
def _loc_series(self, ind):
name = 'loc-series' + next(tokens)
if not self.divisions == ind.divisions:
raise ValueError("Partitions of dataframe and index not the same")
return map_partitions(lambda df, ind: df.loc[ind],
self.columns, self, ind)
def _loc_element(self, ind):
name = 'loc-element' + next(tokens)
part = _partition_of_index_value(self.divisions, ind)
dsk = {(name, 0): (lambda df: df.loc[ind], (self._name, part))}
return type(self)(merge(self.dask, dsk), name,
self.column_info, [ind, ind])
def _loc_slice(self, ind):
name = 'loc-slice' + next(tokens)
assert ind.step in (None, 1)
if ind.start:
start = _partition_of_index_value(self.divisions, ind.start)
else:
start = 0
if ind.stop is not None:
stop = _partition_of_index_value(self.divisions, ind.stop)
else:
stop = self.npartitions - 1
istart = _coerce_loc_index(self.divisions, ind.start)
istop = _coerce_loc_index(self.divisions, ind.stop)
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
divisions = [istart, istop]
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
divisions = ((max(istart, self.divisions[start])
if ind.start is not None
else self.divisions[0],) +
self.divisions[start+1:stop+1] +
(min(istop, self.divisions[stop+1])
if ind.stop is not None
else self.divisions[-1],))
assert len(divisions) == len(dsk) + 1
return type(self)(merge(self.dask, dsk),
name, self.column_info,
divisions)
@property
def loc(self):
return IndexCallable(self._loc)
@property
def iloc(self):
raise AttributeError("Dask Dataframe does not support iloc")
def repartition(self, divisions):
""" Repartition dataframe along new divisions
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
"""
return repartition(self, divisions)
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
@wraps(pd.Series.fillna)
def fillna(self, value):
func = getattr(self._partition_type, 'fillna')
return map_partitions(func, self.column_info, self, value)
def sample(self, frac):
""" Random sample of items
This only implements the ``frac`` option from pandas.
See Also:
pd.DataFrame.sample
"""
func = getattr(self._partition_type, 'sample')
return map_partitions(func, self.column_info, self, None, frac)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
def __init__(self, dsk, _name, name, divisions):
self.dask = dsk
self._name = _name
self.name = name
self.divisions = tuple(divisions)
self.dt = DatetimeAccessor(self)
self.str = StringAccessor(self)
@property
def _args(self):
return (self.dask, self._name, self.name, self.divisions)
@property
def dtype(self):
return self.head().dtype
@property
def column_info(self):
return self.name
@property
def columns(self):
return (self.name,)
def __repr__(self):
return ("dd.Series<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
def quantiles(self, q):
""" Approximate quantiles of column
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
return quantiles(self, q)
def __getitem__(self, key):
name = 'getitem' + next(tokens)
if isinstance(key, Series) and self.divisions == key.divisions:
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self.name, self.divisions)
raise NotImplementedError()
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.inv, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(pd.Series.sum)
def sum(self):
return reduction(self, pd.Series.sum, np.sum)
@wraps(pd.Series.max)
def max(self):
return reduction(self, pd.Series.max, np.max)
@wraps(pd.Series.min)
def min(self):
return reduction(self, pd.Series.min, np.min)
@wraps(pd.Series.count)
def count(self):
return reduction(self, pd.Series.count, np.sum)
@wraps(pd.Series.nunique)
def nunique(self):
return self.drop_duplicates().count()
@wraps(pd.Series.mean)
def mean(self):
def chunk(ser):
return (ser.sum(), ser.count())
def agg(seq):
sums, counts = list(zip(*seq))
return 1.0 * sum(sums) / sum(counts)
return reduction(self, chunk, agg)
@wraps(pd.Series.var)
def var(self, ddof=1):
def chunk(ser):
return (ser.sum(), (ser**2).sum(), ser.count())
def agg(seq):
x, x2, n = list(zip(*seq))
x = float(sum(x))
x2 = float(sum(x2))
n = sum(n)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
return reduction(self, chunk, agg)
@wraps(pd.Series.std)
def std(self, ddof=1):
name = 'std' + next(tokens)
df = self.var(ddof=ddof)
dsk = {(name, 0): (sqrt, (df._name, 0))}
return Scalar(merge(df.dask, dsk), name)
@wraps(pd.Series.value_counts)
def value_counts(self):
chunk = lambda s: s.value_counts()
agg = lambda s: s.groupby(level=0).sum()
return aca(self, chunk=chunk, aggregate=agg, columns=self.columns)
@wraps(pd.Series.isin)
def isin(self, other):
return elemwise(pd.Series.isin, self, other)
@wraps(pd.Series.map)
def map(self, arg, na_action=None):
return elemwise(pd.Series.map, self, arg, na_action, name=self.name)
@wraps(pd.Series.astype)
def astype(self, dtype):
return map_partitions(pd.Series.astype, self.name, self, dtype)
@wraps(pd.Series.dropna)
def dropna(self):
return map_partitions(pd.Series.dropna, self.name, self)
@wraps(pd.Series.between)
def between(self, left, right, inclusive=True):
return map_partitions(pd.Series.between, self.name, self, left, right,
inclusive)
@wraps(pd.Series.clip)
def clip(self, lower=None, upper=None):
return map_partitions(pd.Series.clip, self.name, self, lower, upper)
@wraps(pd.Series.notnull)
def notnull(self):
return map_partitions(pd.Series.notnull, self.name, self)
class Index(Series):
pass
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
This is a work in progress. It is buggy and far from complete.
Please do not use it yet.
Parameters
----------
dask: dict
The dask graph to compute this Dataframe
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
columns: list of strings
Column names. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
def __init__(self, dask, name, columns, divisions):
self.dask = dask
self._name = name
self.columns = tuple(columns)
self.divisions = tuple(divisions)
@property
def _args(self):
return (self.dask, self._name, self.columns, self.divisions)
def __getitem__(self, key):
if isinstance(key, (str, unicode)):
name = self._name + '.' + key
if key in self.columns:
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return Series(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, list):
name = '%s[%s]' % (self._name, str(key))
if all(k in self.columns for k in key):
dsk = dict(((name, i), (operator.getitem,
(self._name, i),
(list, key)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'slice-with-series' + next(tokens)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, key.dask, dsk), name,
self.columns, self.divisions)
raise NotImplementedError()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as e:
try:
return self[key]
except NotImplementedError:
raise e
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.columns)))
def __repr__(self):
return ("dd.DataFrame<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
@property
def dtypes(self):
return get(self.dask, self._keys()[0]).dtypes
def set_index(self, other, **kwargs):
return set_index(self, other, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See also:
set_index
"""
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
return self.columns
def groupby(self, key, **kwargs):
return GroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
return categorize(self, columns, **kwargs)
@wraps(pd.DataFrame.assign)
def assign(self, **kwargs):
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df = pd.DataFrame(columns=self.columns)
df2 = df.assign(**dict((k, []) for k in kwargs))
return elemwise(_assign, self, *pairs, columns=list(df2.columns))
def _assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[slice(start, stop)]
if not include_right_boundary:
# result = df[df.index != stop]
result = result.iloc[:result.index.get_slice_bound(stop, 'left',
result.index.inferred_type)]
return result
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o)
return o
def head(x, n):
""" First n elements of dask.Dataframe or dask.Series """
return x.head(n)
def consistent_name(names):
""" New name for series in elementwise operation
If all truthy names are the same, choose that one, otherwise, choose None
"""
allnames = set()
for name in names:
if name is None:
continue
if isinstance(name, (tuple, list)):
allnames.update(name)
else:
allnames.add(name)
if len(allnames) == 1:
return first(allnames)
else:
return None
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
columns = kwargs.get('columns', None)
name = kwargs.get('name', None)
_name = 'elemwise' + next(tokens)
dfs = [arg for arg in args if isinstance(arg, _Frame)]
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, _Frame)]
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
assert all(df.divisions == dfs[0].divisions for df in dfs)
assert all(df.npartitions == dfs[0].npartitions for df in dfs)
dsk = dict(((_name, i), (op2,) + frs)
for i, frs in enumerate(zip(*[df._keys() for df in dfs])))
if columns is not None:
return DataFrame(merge(dsk, *[df.dask for df in dfs]),
_name, columns, dfs[0].divisions)
else:
column_name = name or consistent_name(n for df in dfs
for n in df.columns)
return Series(merge(dsk, *[df.dask for df in dfs]),
_name, column_name, dfs[0].divisions)
def remove_empties(seq):
""" Remove items of length 0
>>> remove_empties([1, 2, ('empty', np.nan), 4, 5])
[1, 2, 4, 5]
>>> remove_empties([('empty', np.nan)])
[nan]
>>> remove_empties([])
[]
"""
if not seq:
return seq
seq2 = [x for x in seq
if not (isinstance(x, tuple) and x and x[0] == 'empty')]
if seq2:
return seq2
else:
return [seq[0][1]]
def empty_safe(func, arg):
"""
>>> empty_safe(sum, [1, 2, 3])
6
>>> empty_safe(sum, [])
('empty', 0)
"""
if len(arg) == 0:
return ('empty', func(arg))
else:
return func(arg)
def reduction(x, chunk, aggregate):
""" General version of reductions
>>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP
"""
a = 'reduction-chunk' + next(tokens)
dsk = dict(((a, i), (empty_safe, chunk, (x._name, i)))
for i in range(x.npartitions))
b = 'reduction-aggregation' + next(tokens)
dsk2 = {(b, 0): (aggregate, (remove_empties,
[(a,i) for i in range(x.npartitions)]))}
return Scalar(merge(x.dask, dsk, dsk2), b)
def concat(dfs):
""" Concatenate dataframes along rows
Currently only supports unknown divisions
"""
if any(df.known_divisions for df in dfs):
# For this to work we need to add a final division for "maximum element"
raise NotImplementedError("Concat can't currently handle dataframes"
" with known divisions")
name = 'concat' + next(tokens)
dsk = dict()
i = 0
for df in dfs:
for key in df._keys():
dsk[(name, i)] = key
i += 1
divisions = [None] * (i + 1)
return DataFrame(merge(dsk, *[df.dask for df in dfs]), name,
dfs[0].columns, divisions)
class GroupBy(object):
def __init__(self, df, index=None, **kwargs):
self.df = df
self.index = index
self.kwargs = kwargs
if isinstance(index, list):
assert all(i in df.columns for i in index)
elif isinstance(index, Series):
assert index.divisions == df.divisions
else:
assert index in df.columns
def apply(self, func, columns=None):
if (isinstance(self.index, Series) and
self.index._name == self.df.index._name):
df = self.df
return df.map_partitions(lambda df: df.groupby(level=0).apply(func),
columns=columns)
else:
# df = set_index(self.df, self.index, **self.kwargs)
df = shuffle(self.df, self.index, **self.kwargs)
return map_partitions(lambda df, ind: df.groupby(ind).apply(func),
columns or self.df.columns,
self.df, self.index)
def __getitem__(self, key):
if key in self.df.columns:
return SeriesGroupBy(self.df, self.index, key)
else:
raise KeyError()
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.df.columns)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return self[key]
except KeyError:
raise AttributeError()
class SeriesGroupBy(object):
def __init__(self, df, index, key, **kwargs):
self.df = df
self.index = index
self.key = key
self.kwargs = kwargs
def apply(func, columns=None):
# df = set_index(self.df, self.index, **self.kwargs)
if self.index._name == self.df.index._name:
df = self.df
return df.map_partitions(
lambda df: df.groupby(level=0)[self.key].apply(func),
columns=columns)
else:
df = shuffle(self.df, self.index, **self.kwargs)
return map_partitions(
lambda df, index: df.groupby(index).apply(func),
columns or self.df.columns,
self.df, self.index)
def sum(self):
chunk = lambda df, index: df.groupby(index)[self.key].sum()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def min(self):
chunk = lambda df, index: df.groupby(index)[self.key].min()
agg = lambda df: df.groupby(level=0).min()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def max(self):
chunk = lambda df, index: df.groupby(index)[self.key].max()
agg = lambda df: df.groupby(level=0).max()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def count(self):
chunk = lambda df, index: df.groupby(index)[self.key].count()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def mean(self):
def chunk(df, index):
g = df.groupby(index)
return g.agg({self.key: ['sum', 'count']})
def agg(df):
g = df.groupby(level=0)
x = g.agg({(self.key, 'sum'): 'sum',
(self.key, 'count'): 'sum'})
result = x[self.key]['sum'] / x[self.key]['count']
result.name = self.key
return result
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def nunique(self):
def chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
return (df.groupby(index)
.apply(pd.DataFrame.drop_duplicates, subset=self.key)
.set_index(index))
def agg(df):
return df.groupby(level=0)[self.key].nunique()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def apply_concat_apply(args, chunk=None, aggregate=None, columns=None):
""" Apply a function to blocks, the concat, then apply again
Parameters
----------
args: dask.DataFrames
All Dataframes should be partitioned and indexed equivalently
chunk: function [block-per-arg] -> block
Function to operate on each block of data
aggregate: function concatenated-block -> block
Function to operate on the concatenated result of chunk
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args
if isinstance(arg, _Frame))
a = 'apply-concat-apply--first' + next(tokens)
dsk = dict(((a, i), (apply, chunk, (list, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args])))
for i in range(args[0].npartitions))
b = 'apply-concat-apply--second' + next(tokens)
dsk2 = {(b, 0): (aggregate,
(pd.concat,
(list, [(a, i) for i in range(args[0].npartitions)])))}
return type(args[0])(
merge(dsk, dsk2, *[a.dask for a in args
if isinstance(a, _Frame)]),
b, columns, [None, None])
def map_partitions(func, columns, *args):
""" Apply Python function on each DataFrame block
Provide columns of the output if they are not the same as the input.
"""
assert all(not isinstance(arg, _Frame) or
arg.divisions == args[0].divisions
for arg in args)
name = 'map-partitions' + next(tokens)
dsk = dict(((name, i), (apply, func,
(tuple, [(arg._name, i)
if isinstance(arg, _Frame)
else arg
for arg in args])))
for i in range(args[0].npartitions))
return type(args[0])(merge(dsk, *[arg.dask for arg in args
if isinstance(arg, _Frame)]),
name, columns, args[0].divisions)
aca = apply_concat_apply
def categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals,
ordered=False, name=col)
return df
def categorize(df, columns=None, **kwargs):
"""
Convert columns of dataframe to category dtype
This aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = df.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [df[col].drop_duplicates() for col in columns]
values = compute(distincts, **kwargs)
func = partial(categorize_block, categories=dict(zip(columns, values)))
return df.map_partitions(func, columns=df.columns)
def quantiles(df, q, **kwargs):
""" Approximate quantiles of column
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert len(df.columns) == 1
if not len(q):
return da.zeros((0,), chunks=((0,),))
from dask.array.percentile import _percentile, merge_percentiles
name = 'quantiles-1' + next(tokens)
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), q))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2' + next(tokens)
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3' + next(tokens)
merge_dsk = {(name3, 0): (merge_percentiles, q, [q] * df.npartitions,
sorted(val_dsk),
sorted(len_dsk))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return da.Array(dsk, name3, chunks=((len(q),),))
def get(dsk, keys, get=None, **kwargs):
""" Get function with optimizations specialized to dask.Dataframe """
from .optimize import optimize
dsk2 = optimize(dsk, keys, **kwargs)
get = get or _globals['get'] or threaded.get
return get(dsk2, keys, **kwargs) # use synchronous scheduler for now
def pd_split(df, p, seed=0):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], seed=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, seed)
return [df.iloc[index == i] for i in range(len(p))]
def repartition_divisions(a, b, name, out1, out2):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a: tuple
old divisions
b: tuple
new divisions
name: str
name of old dataframe
out1: str
name of temporary splits
out2: str
name of new dataframe
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
assert a[0] == b[0]
assert a[-1] == b[-1]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1
k = 0
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k = k + 1
tup = d[(out1, k - 1)]
d[(out1, k - 1)] = tup[:-1] + (True,)
c.append(a[-1])
i, j = 0, 1
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
d[(out2, j - 1)] = (pd.concat, (list, tmp))
j += 1
return d
def repartition(df, divisions):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
if isinstance(df, _Frame):
tmp = 'repartition-split' + next(tokens)
out = 'repartition-merge' + next(tokens)
dsk = repartition_divisions(df.divisions, divisions, df._name, tmp, out)
return type(df)(merge(df.dask, dsk), out, df.column_info, divisions)
elif isinstance(df, pd.core.generic.NDFrame):
name = 'repartition-dataframe' + next(tokens)
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
if isinstance(df, pd.DataFrame):
return DataFrame(dsk, name, df.columns, divisions)
if isinstance(df, pd.Series):
return Series(dsk, name, df.name, divisions)
class DatetimeAccessor(object):
""" Datetime functions
Examples
--------
>>> df.mydatetime.dt.microsecond # doctest: +SKIP
"""
def __init__(self, series):
self._series = series
def __dir__(self):
return sorted(set(dir(type(self)) + dir(pd.Series.dt)))
def _property_map(self, key):
return self._series.map_partitions(lambda s: getattr(s.dt, key))
def _function_map(self, key, *args):
func = lambda s: getattr(s.dt, key)(*args)
return self._series.map_partitions(func, *args)
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(pd.Series.dt):
if isinstance(getattr(pd.Series.dt, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise
class StringAccessor(object):
""" String functions
Examples
--------
>>> df.name.lower() # doctest: +SKIP
"""
def __init__(self, series):
self._series = series
def __dir__(self):
return sorted(set(dir(type(self)) + dir(pd.Series.str)))
def _property_map(self, key):
return self._series.map_partitions(lambda s: getattr(s.str, key))
def _function_map(self, key, *args):
func = lambda s: getattr(s.str, key)(*args)
return self._series.map_partitions(func, *args)
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(pd.Series.str):
if isinstance(getattr(pd.Series.str, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise
from .shuffle import set_index, set_partition, shuffle
| bsd-3-clause |
RPGroup-PBoC/gist_pboc_2017 | code/project_pt2_measurement.py | 1 | 6926 | # Import the necessary modules.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Image processing utilities
import skimage.io
import skimage.filters
import skimage.segmentation
import skimage.measure
# In this script, we will learn some more principles regarding image processing
# and segmentation. We'll examine how to extract properties from segmented
# objects and show off our fancy segmentation mask by making an overlay.
# Before we begin, let's type what we did in the first step of the project.
image = skimage.io.imread('data/lacI_titration/O2_delta_phase_pos_16.tif')
# Do our normalization, segmentation, and labeling.
im_float = (image - image.min()) / (image.max() - image.min())
im_blur = skimage.filters.gaussian(im_float, sigma=50.0)
im_sub = im_float - im_blur
im_thresh = im_sub < -0.2
im_lab, num_cells = skimage.measure.label(im_thresh, return_num=True)
plt.figure()
plt.imshow(im_lab, cmap=plt.cm.spectral)
plt.show()
# Looking at the above image, we see that there are far more segemented objects
# than there are actual cells. This is because we are segmenting some of the
# pixels in the background of the images. We imagined that we could get rid of
# these pixels by slecting objects wihch meet a set of area bounds. Before we
# apply any bounds, let's just look at the areas of all of the cells in our
# image.
# Set the physical distance of the pixels in our camera
ip_dist = 0.160 # in units of um/pixel
# Set up a list where we will store the values of the comptued cell areas.
area = []
# We can get the properties of each region in our image by using
# skimage.measure.regionprops. This returns properties such as area, label,
# mean intesity, eccentricity, image moments, etc.
props = skimage.measure.regionprops(im_lab)
# We'll now iterate through each property and extract the area.
for prop in props:
area.append(prop.area * ip_dist**2)
# Let's take a look at the distribution of cell areas.
plt.figure()
plt.hist(area, bins=75)
plt.xlabel('object area (sq. micron)')
plt.ylabel('counts')
plt.show()
# Yikes, it seems like we have a bunch of garbage. What would some good bounds
# be? Our rule-of-thumb is that E. coli is about 2 microns long by one micron
# wide. If we approximate our cell as a rectangle, this gets us to an area of
# 2 sq micron. Of course, not all of our cells are ideal. We can be a little
# more lenient and say that our smallest cell would probably be 0.5 sq micron
# with our largest being about 6 sq. micron. We see in our histogram that we
# have some distribution between about 1.5 - 3.5 square micron. There is
# another distribution that is much smaller than our bounds. Let's filter out
# those objects and see what we segment.
approved_obj = np.zeros_like(im_lab)
for prop in props:
obj_area = prop.area * ip_dist**2
if (obj_area > 0.5) & (obj_area < 6):
# This is an approved cell, let's store the label.
approved_obj += (im_lab == prop.label)
plt.figure()
plt.imshow(approved_obj, cmap=plt.cm.spectral)
plt.show()
# That looks pretty good! Let's make sure this makes sense by looking at the
# new histogram. This means we will have to relabel the image.
im_relab, num_cells = skimage.measure.label(approved_obj, return_num=True)
print("We've identified " + str(num_cells) + " cells!")
cell_props = skimage.measure.regionprops(im_relab)
cell_areas = []
for prop in cell_props:
cell_areas.append(prop.area * ip_dist**2)
plt.figure()
plt.hist(cell_areas, bins=10)
plt.xlabel('cell area (sq. micron)')
plt.ylabel('counts')
plt.show()
# That looks great! We've even selected the right number of cells. We have one
# last issue, though. There seems to be a cell on the edge that is not
# completely in the image. Since we are ultimately interested in extracting
# quantitative information about the fluorescence, we want to remove any cells
# that are not completely in the field of view. To do this, we can use the
# skimage.segmentation.clear_border command which will delete all cells that
# are touching the border. This only works on a binary image, meaning that we
# will have to relabel the image after the cell has been removed.
im_border = skimage.segmentation.clear_border(approved_obj)
im_border_lab = skimage.measure.label(im_border)
plt.figure()
plt.imshow(im_border_lab, cmap=plt.cm.spectral_r)
plt.show()
# Very nice! We now have a segmentation mask that passes our tests. We've done
# all of our segmentation in phase contrast, which is not the chanel which has
# our fluorescence information. Let's load up that image and take a look.
im_fluo = skimage.io.imread('data/lacI_titration/O2_delta_yfp_pos_16.tif')
plt.figure()
plt.imshow(im_fluo, cmap=plt.cm.Greys_r)
plt.show()
# This is a much different image. The cells are now bright against dark
# background. Since this is the channel we are interested in, why didn't we
# segment this image? Well, we are interested in the quantitative information.
# If we segmented through simple thresholding (like we've been doing), we would
# preferentially segment the brightest cells while ignoring the cells that may
# have very little to no signal. We can use the mask we generated from
# segmenting in phase contrast and apply it on this image to extract the
# fluorescence information.
# Extract the fluorescence properties.
cell_fl_props = skimage.measure.regionprops(im_border_lab,
intensity_image=im_fluo)
# Let's look at the distribution of mean cell intensities.
cell_ints = []
for prop in cell_fl_props:
cell_ints.append(prop.mean_intensity)
plt.figure()
plt.hist(cell_ints, bins=10)
plt.xlabel('fluorescence pixel intensity (a. u.)')
plt.ylabel('counts')
plt.show()
# We can see that the fluorescence intensity is pretty bright and distributed
# between 2000 and 6000 counts for 23 cells in our segmentation mask. We can
# very easily compute the mean intensity of the cells in this image using
# the list of cell intensities we just generated.
mean_intensity = np.mean(cell_ints)
print('The mean cell intensity is ' + str(mean_intensity) + ' counts.')
# Let's do one more thing for fun. It's often useful to generate an overlay
# of your segmentation mask to show others that you are only looking at cell
# mass in your measurements. We'll do this by generating an image where the
# segmented cells are colored in blue over our original image.
# Make a copy of our float phase image.
phase_copy = np.copy(im_float)
# Color the segemented parts.
phase_copy[im_border > 0] = 1.0
# Make an RGB image with the blue channel phase_copy.
merge = np.dstack((im_float, im_float, phase_copy))
# Show it!
plt.figure()
plt.imshow(merge)
plt.show()
# Looks great! We've done a lot of work in this tutorial. In the next script,
# you will get the chance to write everything we've done here as functions so # it is easy to apply it to a large stack of images.
| mit |
lthurlow/Network-Grapher | proj/external/numpy-1.7.0/doc/source/conf.py | 7 | 10752 | # -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary']
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
#master_doc = 'index'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print version, release
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'scipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Manual (DRAFT)" % (project, version)
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = 'scipyshiny_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'indexcontent.html',
}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'numpy'
# Pngmath should try to align formulas properly
pngmath_use_preview = True
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
| mit |
anguyen8/cnn-vis | cnn_vis.org.py | 1 | 25133 | import sys
import argparse, os, tempfile
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imresize, imsave, imread
from scipy.ndimage.filters import gaussian_filter
import os
os.environ['GLOG_minloglevel'] = '2' # suprress Caffe verbose prints
import settings
import site
site.addsitedir(settings.caffe_root)
pycaffe_root = settings.caffe_root # substitute your path here
sys.path.insert(0, pycaffe_root)
import caffe
def tv_norm(x, beta=2.0, verbose=False, operator='naive'):
"""
Compute the total variation norm and its gradient.
The total variation norm is the sum of the image gradient
raised to the power of beta, summed over the image.
We approximate the image gradient using finite differences.
We use the total variation norm as a regularizer to encourage
smoother images.
Inputs:
- x: numpy array of shape (1, C, H, W)
Returns a tuple of:
- loss: Scalar giving the value of the norm
- dx: numpy array of shape (1, C, H, W) giving gradient of the loss
with respect to the input x.
"""
assert x.shape[0] == 1
if operator == 'naive':
x_diff = x[:, :, :-1, :-1] - x[:, :, :-1, 1:]
y_diff = x[:, :, :-1, :-1] - x[:, :, 1:, :-1]
elif operator == 'sobel':
x_diff = x[:, :, :-2, 2:] + 2 * x[:, :, 1:-1, 2:] + x[:, :, 2:, 2:]
x_diff -= x[:, :, :-2, :-2] + 2 * x[:, :, 1:-1, :-2] + x[:, :, 2:, :-2]
y_diff = x[:, :, 2:, :-2] + 2 * x[:, :, 2:, 1:-1] + x[:, :, 2:, 2:]
y_diff -= x[:, :, :-2, :-2] + 2 * x[:, :, :-2, 1:-1] + x[:, :, :-2, 2:]
elif operator == 'sobel_squish':
x_diff = x[:, :, :-2, 1:-1] + 2 * x[:, :, 1:-1, 1:-1] + x[:, :, 2:, 1:-1]
x_diff -= x[:, :, :-2, :-2] + 2 * x[:, :, 1:-1, :-2] + x[:, :, 2:, :-2]
y_diff = x[:, :, 1:-1, :-2] + 2 * x[:, :, 1:-1, 1:-1] + x[:, :, 1:-1, 2:]
y_diff -= x[:, :, :-2, :-2] + 2 * x[:, :, :-2, 1:-1] + x[:, :, :-2, 2:]
else:
assert False, 'Unrecognized operator %s' % operator
grad_norm2 = x_diff ** 2.0 + y_diff ** 2.0
grad_norm2[grad_norm2 < 1e-3] = 1e-3
grad_norm_beta = grad_norm2 ** (beta / 2.0)
loss = np.sum(grad_norm_beta)
dgrad_norm2 = (beta / 2.0) * grad_norm2 ** (beta / 2.0 - 1.0)
dx_diff = 2.0 * x_diff * dgrad_norm2
dy_diff = 2.0 * y_diff * dgrad_norm2
dx = np.zeros_like(x)
if operator == 'naive':
dx[:, :, :-1, :-1] += dx_diff + dy_diff
dx[:, :, :-1, 1:] -= dx_diff
dx[:, :, 1:, :-1] -= dy_diff
elif operator == 'sobel':
dx[:, :, :-2, :-2] += -dx_diff - dy_diff
dx[:, :, :-2, 1:-1] += -2 * dy_diff
dx[:, :, :-2, 2:] += dx_diff - dy_diff
dx[:, :, 1:-1, :-2] += -2 * dx_diff
dx[:, :, 1:-1, 2:] += 2 * dx_diff
dx[:, :, 2:, :-2] += dy_diff - dx_diff
dx[:, :, 2:, 1:-1] += 2 * dy_diff
dx[:, :, 2:, 2:] += dx_diff + dy_diff
elif operator == 'sobel_squish':
dx[:, :, :-2, :-2] += -dx_diff - dy_diff
dx[:, :, :-2, 1:-1] += dx_diff -2 * dy_diff
dx[:, :, :-2, 2:] += -dy_diff
dx[:, :, 1:-1, :-2] += -2 * dx_diff + dy_diff
dx[:, :, 1:-1, 1:-1] += 2 * dx_diff + 2 * dy_diff
dx[:, :, 1:-1, 2:] += dy_diff
dx[:, :, 2:, :-2] += -dx_diff
dx[:, :, 2:, 1:-1] += dx_diff
def helper(name, x):
num_nan = np.isnan(x).sum()
num_inf = np.isinf(x).sum()
num_zero = (x == 0).sum()
print '%s: NaNs: %d infs: %d zeros: %d' % (name, num_nan, num_inf, num_zero)
if verbose:
print '-' * 40
print 'tv_norm debug output'
helper('x', x)
helper('x_diff', x_diff)
helper('y_diff', y_diff)
helper('grad_norm2', grad_norm2)
helper('grad_norm_beta', grad_norm_beta)
helper('dgrad_norm2', dgrad_norm2)
helper('dx_diff', dx_diff)
helper('dy_diff', dy_diff)
helper('dx', dx)
print
return loss, dx
def p_norm(x, p=6.0, scale=10.0):
"""
Compute the p-norm for an image and its gradient.
The p-norm is defined as
|x|_p = (\sum_i |x_i|^p)^(1/p)
so strictly speaking this fucntion actually computes the pth power of the
p-norm.
We use it as a regularizer to prevent individual pixels from getting too big.
We don't actually want to drive pixels toward zero; we are more interested in
making sure they stay within a reasonable range. This suggests that we divide
the pixels by a scaling factor and use a high value of p; as suggested by
[1] p=6 tends to work well.
Inputs:
- x: numpy array of any shape
- p: Power for p-norm
- scale: Scale for p-norm.
Returns a tuple of:
- loss: Value of the p-norm
"""
loss = (np.abs(x / scale) ** p).sum()
grad = p / scale * np.sign(x / scale) * np.abs(x / scale) ** (p - 1)
return loss, grad
def rmsprop(dx, cache=None, decay_rate=0.95):
"""
Use RMSProp to compute a step from gradients.
Inputs:
- dx: numpy array of gradients.
- cache: numpy array of same shape as dx giving RMSProp cache
- decay_rate: How fast to decay cache
Returns a tuple of:
- step: numpy array of the same shape as dx giving the step. Note that this
does not yet take the learning rate into account.
- cache: Updated RMSProp cache.
"""
if cache is None:
cache = np.zeros_like(dx)
cache = decay_rate * cache + (1 - decay_rate) * dx ** 2
step = -dx / np.sqrt(cache + 1e-8)
return step, cache
def get_cnn_grads(cur_img, regions, net, target_layer, step_type='amplify_layer', **kwargs):
"""
Inputs:
- cur_img: 3 x H x W
- regions: Array of (y0, y1, x0, x1); must all have same shape as input to CNN
- target_layer: String
Returns:
- grads: N x 3 x h x w array where grads[i] is the image gradient for regions[i] of cur_img
"""
cur_batch = np.zeros_like(net.blobs['data'].data)
batch_size = cur_batch.shape[0]
next_idx = 0
def run_cnn(data):
net.forward(data=data)
if step_type == 'amplify_layer':
l1_weight = kwargs.get('L1_weight', 1.0)
l2_weight = kwargs.get('L2_weight', 1.0)
grad_clip = kwargs.get('grad_clip', 5)
target_data = net.blobs[target_layer].data.copy()
target_diff = -l1_weight * np.abs(target_data)
target_diff -= l2_weight * np.clip(target_data, -grad_clip, grad_clip)
net.blobs[target_layer].diff[...] = target_diff
elif step_type == 'amplify_neuron':
if 'target_neuron' not in kwargs:
raise ValueError('Must specify target_neuron for step_type=amplify_neuron')
target_idx = kwargs['target_neuron']
net.blobs[target_layer].diff[...] = 0.0
net.blobs[target_layer].diff[:, target_idx] = -1.0
else:
raise ValueError('Unrecognized step_type "%s"' % step_type)
net.backward(start=target_layer)
return net.blobs['data'].diff.copy()
grads = []
for region in regions:
y0, y1, x0, x1 = region
cur_batch[next_idx] = cur_img[0, :, y0:y1, x0:x1]
next_idx += 1
if next_idx == batch_size:
grads.append(run_cnn(cur_batch))
next_idx = 0
if next_idx > 0:
grad = run_cnn(cur_batch)
grads.append(grad[:next_idx])
vgrads = np.vstack(grads)
return vgrads
def img_to_uint(img, mean_img=None, rescale=False):
"""
Do post-processing to convert images from caffe format to something more reasonable.
Inputs:
- img: numpy array of shape (1, C, H, W)
- mean_img: numpy array giving a mean image to add in
Returns:
A version of img that can be saved to disk or shown with matplotlib
"""
if mean_img is not None:
# Be lazy and just add the mean color
img = 1.2 * img + mean_img.mean()
# Renormalize so everything is in the range [0, 255]
if rescale:
low, high = img.min(), img.max()
else:
low, high = 0, 255
# low = max(img.mean() - 2.5 * img.std(axis=None), img.min())
# high = min(img.mean() + 2.5 * img.std(axis=None), img.max())
img = np.clip(img, low, high)
img = 255.0 * (img - low) / (high - low)
# Squeeze out extra dimensions and flip from (C, H, W) to (H, W, C)
img = img.squeeze().transpose(1, 2, 0)
# Caffe models are trained with BGR; flip to RGB
img = img[:, :, [2, 1, 0]]
# finally convert to uint8
return img.astype('uint8')
def uint_to_img(uint_img, mean_img=None):
"""
Do pre-processing to convert images from a normal format to caffe format.
"""
img = uint_img.astype('float')
img = img[:, :, [2, 1, 0]]
img = img.transpose(2, 0, 1)
img = img[np.newaxis, :, :, :]
if mean_img is not None:
img = img - mean_img.mean()
return img
def resize_img(img, new_size, mean_img=None):
img_uint = img_to_uint(img, mean_img)
img_uint_r = imresize(img_uint, new_size, interp='bicubic')
img_r = uint_to_img(img_uint_r, mean_img)
return img_r
high, low = img.max(), img.min()
img_shifted = 255.0 * (img - low) / (high - low)
img_uint = img_shifted.squeeze().transpose(1, 2, 0).astype('uint8')
img_uint_r = imresize(img_uint, new_size)
img_shifted_r = img_uint_r.astype(img.dtype).transpose(2, 0, 1)[None, :, :, :]
img_r = (img_shifted_r / 255.0) * (high - low) + low
return img_r
def write_temp_deploy(source_prototxt, batch_size):
"""
Modifies an existing prototxt by adding force_backward=True and setting
the batch size to a specific value. A modified prototxt file is written
as a temporary file.
Inputs:
- source_prototxt: Path to a deploy.prototxt that will be modified
- batch_size: Desired batch size for the network
Returns:
- path to the temporary file containing the modified prototxt
"""
_, target = tempfile.mkstemp()
with open(source_prototxt, 'r') as f:
lines = f.readlines()
force_back_str = 'force_backward: true\n'
if force_back_str not in lines:
lines.insert(1, force_back_str)
found_batch_size_line = False
with open(target, 'w') as f:
for line in lines:
if line.startswith('input_dim:') and not found_batch_size_line:
found_batch_size_line = True
line = 'input_dim: %d\n' % batch_size
f.write(line)
return target
def get_ranges(total_length, region_length, num):
starts = np.linspace(0, total_length - region_length, num)
starts = [int(round(s)) for s in starts]
ranges = [(s, s + region_length) for s in starts]
return ranges
def check_ranges(total_length, ranges):
"""
Check to make sure the given ranges are valid.
Inputs:
- total_length: Integer giving total length
- ranges: Sorted list of tuples giving (start, end) for each range.
Returns: Boolean telling whether ranges are valid.
"""
# The start of the first range must be 0
if ranges[0][0] != 0:
return False
# The end of the last range must fill the length
if ranges[-1][1] != total_length:
return False
for i, cur_range in enumerate(ranges):
# The ranges must be distinct
if i + 1 < len(ranges) and cur_range[0] == ranges[i + 1][0]:
return False
# The ranges must cover all the pixels
if i + 1 < len(ranges) and cur_range[1] < ranges[i + 1][0]:
return False
# Each range should not overlap with its second neighbor
if i + 2 < len(ranges) and cur_range[1] >= ranges[i + 2][0]:
return False
return True
def get_best_ranges(total_length, region_length):
"""
Get the first packing that is valid.
"""
max_num = 1000 # this should be enough for anyone ...
num = 1
while True:
ranges = get_ranges(total_length, region_length, num)
if check_ranges(total_length, ranges):
return ranges
else:
if num > max_num:
return None
num = num + 1
return None
def get_regions(total_size, region_size):
print 'total_size: ', total_size
print 'region_size: ', region_size
H, W = total_size
h, w = region_size
y_ranges = get_best_ranges(H, h)
x_ranges = get_best_ranges(W, w)
regions_even = []
regions_odd = []
all_regions = []
for i, x_range in enumerate(x_ranges):
for j, y_range in enumerate(y_ranges):
region = (y_range[0], y_range[1], x_range[0], x_range[1])
if i % 2 == j % 2:
regions_even.append(region)
else:
regions_odd.append(region)
return regions_even, regions_odd
def count_regions_per_pixel(total_size, regions):
counts = np.zeros(total_size)
for region in regions:
y0, y1, x0, x1 = region
counts[y0:y1, x0:x1] += 1
return counts
def get_base_size(net_size, initial_image):
if initial_image is None:
return net_size[2:]
else:
img = imread(initial_image)
return img.shape[:2]
def get_size_sequence(base_size, initial_size, final_size, num_sizes, resize_type):
base_h, base_w = base_size
def parse_size_str(size_str):
if size_str is None:
return base_size
elif size_str.startswith('x'):
scale = float(size_str[1:])
h = int(scale * base_h)
w = int(scale * base_w)
return h, w
elif 'x' in size_str:
h, w = size_str.split('x')
return int(h), int(w)
initial_h, initial_w = parse_size_str(initial_size)
final_h, final_w = parse_size_str(final_size)
if num_sizes == 1:
return [(initial_h, initial_w)]
else:
if resize_type == 'geometric':
h0, h1 = np.log10(initial_h), np.log10(final_h)
w0, w1 = np.log10(initial_w), np.log10(final_w)
heights = np.logspace(h0, h1, num_sizes)
widths = np.logspace(w0, w1, num_sizes)
elif resize_type == 'linear':
heights = np.linspace(initial_h, final_h, num_sizes)
widths = np.linspace(initial_w, final_w, num_sizes)
else:
raise ValueError('Invalid resize_type "%s"' % resize_type)
heights = np.round(heights).astype('int')
widths = np.round(widths).astype('int')
return zip(heights, widths)
def initialize_img(net_size, initial_image, initial_size, mean_img, scale, blur):
_, C, H, W = net_size
def init_size_fn(h, w):
if initial_size is None:
return h, w
elif initial_size.startswith('x'):
scale = float(initial_size[1:])
return int(scale * h), int(scale * w)
elif 'x' in initial_size:
h, w = initial_size.split('x')
return int(h), int(w)
if initial_image is not None:
init_img = imread(initial_image)
init_h, init_w = init_img.shape[:2]
init_h, init_w = init_size_fn(init_h, init_w)
init_img = imresize(init_img, (init_h, init_w))
init_img = uint_to_img(init_img, mean_img)
else:
init_h, init_w = init_size_fn(H, W)
init_img = scale * np.random.randn(1, C, init_h, init_w)
init_img_uint = img_to_uint(init_img, mean_img)
init_img_uint_blur = gaussian_filter(init_img_uint, sigma=blur)
init_img = uint_to_img(init_img_uint_blur, mean_img)
return init_img
def build_parser():
parser = argparse.ArgumentParser()
# CNN options
parser.add_argument('--deploy_txt', default=settings.encoder_definition)
parser.add_argument('--caffe_model', default=settings.encoder_path)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--mean_image', default="/home/anh/src/caffe-fr-chairs/python/caffe/imagenet/ilsvrc_2012_mean.npy")
parser.add_argument('--gpu', type=int, default=1)
# Image options
parser.add_argument('--image_type', default='amplify_layer',
choices=['amplify_layer', 'amplify_neuron'])
parser.add_argument('--target_layer', default='inception_4d/3x3_reduce')
parser.add_argument('--target_neuron', default=0, type=int)
# Initialization options
parser.add_argument('--initial_image', default=None)
parser.add_argument('--initialization_scale', type=float, default=1.0)
parser.add_argument('--initialization_blur', type=float, default=0.0)
# Resize options
parser.add_argument('--initial_size', default=None)
parser.add_argument('--final_size', default=None)
parser.add_argument('--num_sizes', default=1, type=int)
parser.add_argument('--resize_type', default='geometric',
choices=['geometric', 'linear'])
# Optimization options
parser.add_argument('--learning_rate', type=float, default=1.0)
parser.add_argument('--decay_rate', type=float, default=0.95)
parser.add_argument('--learning_rate_decay_iter', type=int, default=100)
parser.add_argument('--learning_rate_decay_fraction', type=float, default=1.0)
parser.add_argument('--num_steps', type=int, default=1000)
parser.add_argument('--use_pixel_learning_rates', action='store_true')
# Options for layer amplification
parser.add_argument('--amplify_l1_weight', type=float, default=1.0)
parser.add_argument('--amplify_l2_weight', type=float, default=1.0)
parser.add_argument('--amplify_grad_clip', type=float, default=5.0)
# P-norm regularization options
parser.add_argument('--alpha', type=float, default=6.0)
parser.add_argument('--p_scale', type=float, default=1.0)
parser.add_argument('--p_reg', type=float, default=1e-4)
# Auxiliary P-norm regularization options
parser.add_argument('--alpha_aux', type=float, default=6.0)
parser.add_argument('--p_scale_aux', type=float, default=1.0)
parser.add_argument('--p_reg_aux', type=float, default=0.0)
# TV regularization options
parser.add_argument('--beta', type=float, default=2.0)
parser.add_argument('--tv_reg', type=float, default=0.5)
parser.add_argument('--tv_reg_scale', type=float, default=1.0)
parser.add_argument('--tv_reg_step', type=float, default=0.0)
parser.add_argument('--tv_reg_step_iter', type=int, default=50)
parser.add_argument('--tv_grad_operator', default='naive',
choices=['naive', 'sobel', 'sobel_squish'])
# Output options
parser.add_argument('--output_file', default='out.png')
parser.add_argument('--output_iter', default=50, type=int)
parser.add_argument('--show_width', default=5, type=int)
parser.add_argument('--show_height', default=5, type=int)
parser.add_argument('--rescale_image', action='store_true')
parser.add_argument('--iter_behavior', default='save+print')
return parser
def main(args):
if args.gpu < 0:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
#caffe.set_device(args.gpu)
# Build the net; paths may have CAFFE_ROOT
proto_file = os.path.expandvars(args.deploy_txt)
proto_file = write_temp_deploy(proto_file, args.batch_size)
caffe_model_file = os.path.expandvars(args.caffe_model)
net = caffe.Net(proto_file, caffe_model_file, caffe.TEST)
net_size = net.blobs['data'].data.shape
C, H, W = net_size[1:]
mean_img = np.load(os.path.expandvars(args.mean_image))
init_img = initialize_img(net_size, args.initial_image, args.initial_size, mean_img,
args.initialization_scale,
args.initialization_blur)
img = init_img.copy()
if args.initial_image is None:
init_img = None
# Get size sequence
base_size = get_base_size(net_size, args.initial_image)
print 'base_size is %r' % (base_size,)
size_sequence = get_size_sequence(base_size,
args.initial_size,
args.final_size,
args.num_sizes,
args.resize_type)
msg = ('Initial size %r is too small; must be at least %r'
% (size_sequence[0], (H, W)))
assert size_sequence[0] >= (H, W), msg
# Run optimization
for size_idx, size in enumerate(size_sequence):
size_flag = False
if size_idx > 0:
img = resize_img(img, size, mean_img)
if init_img is not None:
raw_init = imread(args.initial_image)
init_img_uint = imresize(raw_init, size)
init_img = uint_to_img(init_img_uint, mean_img)
tv_reg = args.tv_reg
learning_rate = args.learning_rate
regions = get_regions((img.shape[2], img.shape[3]), (H, W))
regions_even, regions_odd = regions
regions_per_pixel = count_regions_per_pixel((img.shape[2], img.shape[3]), regions_even+regions_odd)
pixel_learning_rates = 1.0 / regions_per_pixel
caches = {}
pix_history = defaultdict(list)
pix = [(100, 100), (200, 200), (100, 200), (200, 100)]
for t in xrange(args.num_steps):
for c in [0, 1, 2]:
for py, px in pix:
pix_history[(c, py, px)].append(img[0, c, py, px])
for cur_regions in [regions_even, regions_odd]:
if len(cur_regions) == 0: continue
cnn_grad = get_cnn_grads(img, cur_regions, net, args.target_layer,
step_type=args.image_type,
L1_weight=args.amplify_l1_weight,
L2_weight=args.amplify_l2_weight,
grad_clip=args.amplify_grad_clip,
target_neuron=args.target_neuron)
for region_idx, region in enumerate(cur_regions):
y0, y1, x0, x1 = region
img_region = img[:, :, y0:y1, x0:x1]
if init_img is not None:
init_region = init_img[0, :, y0:y1, x0:x1]
p_loss, p_grad = p_norm(img_region - init_region, p=args.alpha, scale=args.p_scale)
else:
p_loss, p_grad = p_norm(img_region, p=args.alpha, scale=args.p_scale)
p_loss_aux, p_grad_aux = p_norm(img_region, p=args.alpha_aux, scale=args.p_scale_aux)
tv_loss, tv_grad = tv_norm(img_region / args.tv_reg_scale, beta=args.beta,
operator=args.tv_grad_operator)
tv_grad /= args.tv_reg_scale
dimg = cnn_grad[region_idx] + args.p_reg * p_grad + args.p_reg_aux * p_grad_aux + tv_reg * tv_grad
cache = caches.get(region, None)
step, cache = rmsprop(dimg, cache=cache, decay_rate=args.decay_rate)
caches[region] = cache
step *= learning_rate
if args.use_pixel_learning_rates:
step *= pixel_learning_rates[y0:y1, x0:x1]
img[:, :, y0:y1, x0:x1] += step
if (t + 1) % args.tv_reg_step_iter == 0:
tv_reg += args.tv_reg_step
if (t + 1) % args.learning_rate_decay_iter == 0:
learning_rate *= args.learning_rate_decay_fraction
if (t + 1) % args.output_iter == 0:
should_plot_pix = 'plot_pix' in args.iter_behavior
should_show = 'show' in args.iter_behavior
should_save = 'save' in args.iter_behavior
should_print = args.iter_behavior
if False:
values = [img_region.flatten(),
cnn_grad.flatten(),
#(args.p_reg * p_grad).flatten(),
#(tv_reg * tv_grad).flatten()]
(args.p_reg * p_grad + tv_reg * tv_grad).flatten(),
step.flatten()]
names = ['pixel', 'cnn grad', 'reg', 'step']
subplot_idx = 1
for i, (name_i, val_i) in enumerate(zip(names, values)):
for j, (name_j, val_j) in enumerate(zip(names, values)):
x_min = val_i.min() - 0.1 * np.abs(val_i.min())
x_max = val_i.max() + 0.1 * np.abs(val_i.max())
y_min = val_j.min() - 0.1 * np.abs(val_j.min())
y_max = val_j.max() + 0.1 * np.abs(val_j.max())
plt.subplot(len(values), len(values), subplot_idx)
plt.scatter(val_i, val_j)
plt.plot(np.linspace(x_min, x_max), np.linspace(x_min, x_max), '-k')
plt.plot(np.linspace(x_min, x_max), -np.linspace(x_min, x_max), '-k')
plt.xlim([x_min, x_max])
plt.ylim([y_min, y_max])
plt.xlabel(name_i)
plt.ylabel(name_j)
subplot_idx += 1
plt.gcf().set_size_inches(15, 15)
plt.show()
if should_plot_pix:
for p, h in pix_history.iteritems():
plt.plot(h)
plt.show()
if should_print:
print ('Finished iteration %d / %d for size %d / %d' %
(t + 1, args.num_steps, size_idx + 1, len(size_sequence)))
print 'p_loss: ', p_loss
print 'tv_loss: ', tv_loss
if args.image_type == 'amplify_neuron':
target_blob = net.blobs[args.target_layer]
neuron_val = target_blob.data[:, args.target_neuron].mean()
print 'mean neuron val: ', neuron_val
print 'mean p_grad: ', np.abs(args.p_reg * p_grad).mean()
print 'mean p_grad_aux: ', np.abs(args.p_reg_aux * p_grad_aux).mean()
print 'mean tv_grad: ', np.abs(tv_reg * tv_grad).mean()
print 'mean cnn_grad: ', np.abs(cnn_grad).mean()
print 'step mean, median: ', np.abs(step).mean(), np.median(np.abs(step))
print 'image mean, std: ', img.mean(), img.std()
print 'mean step / val: ', np.mean(np.abs(step) / np.abs(img_region))
img_uint = img_to_uint(img, mean_img, rescale=args.rescale_image)
if should_show:
plt.imshow(img_uint, interpolation='none')
plt.axis('off')
plt.gcf().set_size_inches(args.show_width, args.show_height)
plt.show()
if should_save:
name, ext = os.path.splitext(args.output_file)
filename = '%s_%d_%d%s' % (name, size_idx + 1, t + 1, ext)
imsave(filename, img_uint)
img_uint = img_to_uint(img, mean_img, rescale=args.rescale_image)
imsave(args.output_file, img_uint)
if __name__ == '__main__':
parser = build_parser()
args = parser.parse_args()
main(args)
| mit |
ephes/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.