repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
shreyas15/Product-Recommender-Engine | metadata_preprocessor.py | 1 | 2879 | # metadata_preprocessor.py
#
# Standalone Python/Spark program to perform data pre-processing..
# Reads Ratings data and meta data to combine where necessary
# and encode labels to a form fit for processing.
#
#
# Usage: spark-submit data_preprocessor.py <inputdatafile>
# Example usage: spark-submit data_preprocessor.py ratings.csv
#
#
import sys
import pandas as pd
import numpy as np
import csv
import gzip
from sklearn import preprocessing
from pyspark import SparkContext, SparkConf, SQLContext
conf = (SparkConf().set("spark.driver.maxResultSize", "8g"))
#to read data from gzip files
def parse(path):
g = gzip.open(path, 'rb')
for l in g:
yield eval(l)
#make a dataframe
def getDF(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
names = [
'user_id',
'product_id',
'rating',
'timestamp',
]
def labelEncoder(in_csv):
"This function converts categorical data to numerical values in the supplied dataframe"
#using pandas read the csv and append column names from names
# input_data = pd.read_csv(in_csv, sep=",", names=names)
input_data = pd.read_csv(in_csv, sep=",")
#print input_data.head()
#
user_id_en = preprocessing.LabelEncoder()
product_id_en = preprocessing.LabelEncoder()
user_id_en.fit(input_data.user_id)
product_id_en.fit(input_data.product_id)
encoded_df = input_data
encoded_df.user_id = user_id_en.transform(input_data.user_id)
encoded_df.product_id = product_id_en.transform(input_data.product_id)
#encoded_df.to_csv('encoded_data_w_index_headers.csv', sep='::',index = False)
encoded_df.to_csv('ratings_als.csv', sep='|', index = False, header=None)
#return encoded_df
#return input_data
if __name__ == "__main__":
# if len(sys.argv) !=3:
# print >> sys.stderr, "Usage: data_preprocessor <ratings_file> <metadata_gzip_file>"
# exit(-1)
sc = SparkContext(appName="DataProcessor", conf=conf)
sqlContext = SQLContext(sc)
## Use this if the file being read is a JSON that is gzipped.
metadata_df = getDF(sys.argv[1])
metadata_df.rename(columns={'asin': 'product_id'}, inplace=True)
metadata_df.drop('description', axis=1, inplace=True)
metadata_df.drop('price', axis=1, inplace=True)
metadata_df.drop('salesRank', axis=1, inplace=True)
metadata_df.drop('imUrl', axis=1, inplace=True)
metadata_df.drop('brand', axis=1, inplace=True)
metadata_df.drop('related', axis=1, inplace=True)
#metadata_df.to_csv('metadata.csv', sep=',')
metadata_df.to_csv('temp_metadata.csv', sep=',', index = False)
#labelEncoder(sys.argv[1])
#labelEncoder(temp_metadata.csv)
# input_df.drop('timestamp', axis=1, inplace=True)
# input_df.to_csv('input.csv', sep=',', index = False)
sc.stop()
| mit |
agomariz/scikit-neuralnetwork | sknn/tests/test_sklearn.py | 5 | 2706 | import unittest
from nose.tools import (assert_equal, assert_raises, assert_in, assert_not_in)
import numpy
from scipy.stats import randint, uniform
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.cross_validation import cross_val_score
from sknn.mlp import Regressor as MLPR, Classifier as MLPC
from sknn.mlp import Layer as L
class TestGridSearchRegressor(unittest.TestCase):
__estimator__ = MLPR
def setUp(self):
self.a_in = numpy.random.uniform(0.0, 1.0, (64,16))
self.a_out = numpy.zeros((64,1))
def test_GridGlobalParams(self):
clf = GridSearchCV(
self.__estimator__(layers=[L("Linear")], n_iter=1),
param_grid={'learning_rate': [0.01, 0.001]})
clf.fit(self.a_in, self.a_out)
def test_GridLayerParams(self):
clf = GridSearchCV(
self.__estimator__(layers=[L("Rectifier", units=12), L("Linear")], n_iter=1),
param_grid={'hidden0__units': [4, 8, 12]})
clf.fit(self.a_in, self.a_out)
def test_RandomGlobalParams(self):
clf = RandomizedSearchCV(
self.__estimator__(layers=[L("Softmax")], n_iter=1),
param_distributions={'learning_rate': uniform(0.001, 0.01)},
n_iter=2)
clf.fit(self.a_in, self.a_out)
def test_RandomLayerParams(self):
clf = RandomizedSearchCV(
self.__estimator__(layers=[L("Softmax", units=12), L("Linear")], n_iter=1),
param_distributions={'hidden0__units': randint(4, 12)},
n_iter=2)
clf.fit(self.a_in, self.a_out)
def test_RandomMultipleJobs(self):
clf = RandomizedSearchCV(
self.__estimator__(layers=[L("Softmax", units=12), L("Linear")], n_iter=1),
param_distributions={'hidden0__units': randint(4, 12)},
n_iter=4, n_jobs=4)
clf.fit(self.a_in, self.a_out)
class TestGridSearchClassifier(TestGridSearchRegressor):
__estimator__ = MLPC
def setUp(self):
self.a_in = numpy.random.uniform(0.0, 1.0, (64,16))
self.a_out = numpy.random.randint(0, 4, (64,))
class TestCrossValidation(unittest.TestCase):
def test_Regressor(self):
a_in = numpy.random.uniform(0.0, 1.0, (64,16))
a_out = numpy.zeros((64,1))
cross_val_score(MLPR(layers=[L("Linear")], n_iter=1), a_in, a_out, cv=5)
def test_Classifier(self):
a_in = numpy.random.uniform(0.0, 1.0, (64,16))
a_out = numpy.random.randint(0, 4, (64,))
cross_val_score(MLPC(layers=[L("Linear")], n_iter=1), a_in, a_out, cv=5)
| bsd-3-clause |
zaxliu/deepnap | experiments/kdd-exps/experiment_DynaQNN_130_Feb12_2215.py | 1 | 5180 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'dh3'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
# phi_length = 5
# dim_state = (1, phi_length, 3+2)
# range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
# range_state = [[range_state_slice]*phi_length]
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 0, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgentNN(
env_model=env_model, num_sim=num_sim,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
clairetang6/bokeh | bokeh/charts/builders/line_builder.py | 8 | 9446 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from six import iteritems
from itertools import chain
from ..builder import XYBuilder, create_and_build
from ..glyphs import LineGlyph, PointGlyph
from ..attributes import DashAttr, ColorAttr, MarkerAttr
from ..data_source import NumericalColumnsAssigner
from ...models.sources import ColumnDataSource
from ...core.properties import Bool, String, List
from ..operations import Stack, Dodge
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Line(data=None, x=None, y=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builders.line_builder.LineBuilder>` to
render the glyphs.
The line chart is typically is used with column oriented data, where each column
contains comparable measurements and the column names are treated as a categorical
variable for differentiating the measurement values. One of the columns can be used as
an index for either the x or y axis.
.. note::
Only the x or y axis can display multiple variables, while the other is used
as an index.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
.. note::
This chart type differs on input types as compared to other charts,
due to the way that line charts typically are plotting labeled series. For
example, a column for AAPL stock prices over time. Another way this could be
plotted is to have a DataFrame with a column of `stock_label` and columns of
`price`, which is the stacked format. Both should be supported, but the former
is the expected one. Internally, the latter format is being derived.
Returns:
:class:`Chart`: includes glyph renderers that generate the lines
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(LineBuilder, data, **kws)
class LineBuilder(XYBuilder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
series_names = List(String, help="""Names that represent the items being plotted.""")
stack = Bool(default=False)
default_attributes = {'color': ColorAttr(),
'dash': DashAttr(),
'marker': MarkerAttr()}
dimensions = ['y', 'x']
column_selector = NumericalColumnsAssigner
glyph = LineGlyph
@property
def measures(self):
if isinstance(self.y.selection, list):
return self.y.selection
elif isinstance(self.x.selection, list):
return self.x.selection
else:
return None
@property
def measure_input(self):
return isinstance(self.y.selection, list) or isinstance(self.x.selection, list)
@property
def stack_flags(self):
# Check if we stack measurements and by which attributes
# This happens if we used the same series labels for dimensions as attributes
return {k: self.attr_measurement(k) for k in list(
self.attributes.keys())}
def get_id_cols(self, stack_flags):
# collect the other columns used as identifiers, that aren't a measurement name
id_cols = [self.attributes[attr].columns
for attr, stack in iteritems(stack_flags) if not stack and
self.attributes[attr].columns != self.measures and
self.attributes[attr].columns is not None]
return list(chain.from_iterable(id_cols))
def setup(self):
"""Handle input options that require transforming data and/or user selections."""
# handle special case of inputs as measures
if self.measure_input:
stack_flags = self.stack_flags
id_cols = self.get_id_cols(stack_flags)
# if we have measures input, we need to stack by something, set default
if all(attr is False for attr in list(stack_flags.values())):
stack_flags['color'] = True
# stack the measurement dimension while keeping id columns
self._stack_measures(ids=id_cols)
# set the attributes to key off of the name of the stacked measurement
source = ColumnDataSource(self._data.df)
for attr_name, stack_flag in iteritems(stack_flags):
if stack_flags[attr_name]:
default_attr = self.attributes[attr_name]
default_attr.setup(columns='series', data=source)
# Handle when to use special column names
if self.x.selection is None and self.y.selection is not None:
self.x.selection = 'index'
elif self.x.selection is not None and self.y.selection is None:
self.y.selection = 'index'
def attr_measurement(self, attr_name):
"""Detect if the attribute has been given measurement columns."""
cols = self.attributes[attr_name].columns
return (cols is not None and (cols == self.y.selection or
cols == self.x.selection))
def set_series(self, col_name):
series = self._data.df[col_name].drop_duplicates().tolist()
series = [str(item) for item in series]
self.series_names = series
def _stack_measures(self, ids, var_name='series'):
"""Stack data and keep the ids columns.
Args:
ids (list(str)): the column names that describe the measures
"""
if isinstance(self.y.selection, list):
dim = 'y'
if self.x.selection is not None:
ids.append(self.x.selection)
else:
dim = 'x'
if self.y.selection is not None:
ids.append(self.y.selection)
if len(ids) == 0:
ids = None
dim_prop = getattr(self, dim)
# transform our data by stacking the measurements into one column
self._data.stack_measures(measures=dim_prop.selection, ids=ids,
var_name=var_name)
# update our dimension with the updated data
dim_prop.set_data(self._data)
self.set_series('series')
def get_builder_attr(self):
attrs = self.properties()
return {attr: getattr(self, attr) for attr in attrs
if attr in self.glyph.properties()}
def yield_renderers(self):
build_attr = self.get_builder_attr()
# get the list of builder attributes and only pass them on if glyph supports
attrs = list(self.attributes.keys())
attrs = [attr for attr in attrs if attr in self.glyph.properties()]
for group in self._data.groupby(**self.attributes):
group_kwargs = self.get_group_kwargs(group, attrs)
group_kwargs.update(build_attr)
glyph = self.glyph(label=group.label,
x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
**group_kwargs)
# dash=group['dash']
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
yield renderer
if self.stack:
Stack().apply(self.comp_glyphs)
Dodge().apply(self.comp_glyphs)
class PointSeriesBuilder(LineBuilder):
glyph = PointGlyph
| bsd-3-clause |
matteorr/coco-analyze | analysisAPI/sizeSensitivity.py | 1 | 8981 | ## imports
import os, time
import numpy as np
import matplotlib.pyplot as plt
def sizeSensitivity( coco_analyze, oks, saveDir ):
loc_dir = saveDir + '/benchmarks_sensitivity/size'
if not os.path.exists(loc_dir):
os.makedirs(loc_dir)
os.makedirs(loc_dir + '/all_plots')
f = open('%s/std_out.txt'%loc_dir, 'w')
f.write("Running Analysis: [Size Sensitivity]\n\n")
tic = time.time()
paths = {}
areaRngs = [[32 ** 2, 64 ** 2],[64 ** 2, 96 ** 2],[96 ** 2, 128 ** 2],
[128 ** 2, 1e5 ** 2],[32 ** 2, 1e5 ** 2]]
areaRngLbls = ['medium','large','xlarge','xxlarge','all']
err_types = ['miss','swap','inversion','jitter','score','bckgd_false_pos', 'false_neg']
coco_analyze.params.oksThrs = [oks]
coco_analyze.params.err_types = []
coco_analyze.params.areaRng = areaRngs
coco_analyze.params.areaRngLbl = areaRngLbls
coco_gt = coco_analyze.cocoGt
coco_analyze.cocoEval.params.useGtIgnore = 0
coco_analyze.cocoEval.params.gtIgnoreIds = []
size_index = {}
anns = coco_gt.loadAnns(coco_gt.getAnnIds())
for a in anns:
if areaRngs[0][0] < a['area'] <= areaRngs[0][1]:
size_index.setdefault('medium', []).append(a['id'])
if areaRngs[1][0] < a['area'] <= areaRngs[1][1]:
size_index.setdefault('large', []).append(a['id'])
if areaRngs[2][0] < a['area'] <= areaRngs[2][1]:
size_index.setdefault('xlarge', []).append(a['id'])
if areaRngs[3][0] < a['area'] <= areaRngs[3][1]:
size_index.setdefault('xxlarge', []).append(a['id'])
f.write("Benchmark Dimensions:\n")
for i,a in enumerate(areaRngs[:-1]):
f.write("%d) %s-%s: %d\n"%(i,areaRngLbls[i],a,len(size_index[areaRngLbls[i]])))
fig, ax = plt.subplots(figsize=(6,6))
ax.set_facecolor('lightgray')
x = [1,2,3,4]
y = [len(size_index['medium']), len(size_index['large']), len(size_index['xlarge']), len(size_index['xxlarge'])]
plt.bar(x,y,color='g',align='center')
plt.xticks(x,['med','lrg','xlrg','xxlrg'])
plt.title('Instances Size Distribution',fontsize=20)
plt.grid()
path = '%s/size_benchmarks.pdf'%loc_dir
paths['instance_size_hist'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
stats = []
for eind, err in enumerate(err_types):
if err in ['miss','swap', 'inversion', 'jitter']:
coco_analyze.params.err_types = [err]
coco_analyze.analyze(check_kpts=True, check_scores=False, check_bckgd=False)
if err == 'score':
coco_analyze.params.err_types = []
coco_analyze.analyze(check_kpts=False, check_scores=True, check_bckgd=False)
if err == 'bckgd_false_pos':
coco_analyze.params.err_types = []
coco_analyze.analyze(check_kpts=False, check_scores=False, check_bckgd=True)
if err == 'false_neg': continue
coco_analyze.summarize(makeplots=True, savedir=loc_dir+'/all_plots', team_name=err)
f.write("\nPerformance Breakdown over Area for [%s]:\n"%err)
for s in coco_analyze.stats:
if s['err']==err:
f.write("%s: ap[%.3f], max_rec[%.3f]\n"%(s['areaRngLbl'],s['auc'],s['recall']))
stats += coco_analyze.stats
stats = [dict(t) for t in set([tuple(s.items()) for s in stats])]
f.write("\nPerformance Breakdown over Area for [Original Dts]:\n")
for a in areaRngLbls:
b = [s for s in stats if s['areaRngLbl']==a and s['err']=='baseline'][0]
f.write("%s: ap[%.3f], max_rec[%.3f]\n"%(a,b['auc'],b['recall']))
err_perf = {}
for s in stats:
if s['err'] != 'false_neg':
err_perf[s['err'],s['areaRngLbl']] = s['auc']
else:
bckgd_fp = [ss for ss in stats if (ss['err'],ss['areaRngLbl'])==('bckgd_false_pos',s['areaRngLbl'])][0]
err_perf[s['err'],s['areaRngLbl']] = s['auc'] - bckgd_fp['auc']
baseline = [err_perf['baseline',area] for area in areaRngLbls]
size_performance = {}
for err in err_types:
if err=='false_neg':
size_performance[err] = [err_perf[err,area] for area in areaRngLbls]
else:
size_performance[err] = [err_perf[err,area]-err_perf['baseline',area] for area in areaRngLbls]
f.write("\nAP Improvement over Baseline at all area ranges: %s\n"%areaRngLbls)
for k in size_performance:
f.write("%s: %s\n"%(k, size_performance[k]))
oks_75_auc = baseline
perf_jitt = size_performance['jitter']
perf_inv = size_performance['inversion']
perf_swap = size_performance['swap']
perf_miss = size_performance['miss']
perf_score = size_performance['score']
perf_bk_fp = size_performance['bckgd_false_pos']
perf_bk_fn = size_performance['false_neg']
fig, ax = plt.subplots(figsize=(20,10))
ax.set_facecolor('lightgray')
plt.ylabel("AP Improvement",fontsize=20)
plt.title("Error Sensitivity over size @ OKS Eval Thresh=%.2f"%oks,fontsize=20)
x = [.5,1,1.5,2, 3,3.5,4,4.5, 5.5,6,6.5,7, 8,8.5,9,9.5,
10.5,11,11.5,12, 13,13.5,14,14.5, 15.5,16,16.5,17]
y = perf_jitt[:4] + perf_inv[:4] + perf_swap[:4] + \
perf_miss[:4] + perf_score[:4] + perf_bk_fp[:4] + perf_bk_fn[:4]
plt.scatter(x,y,c='b',s=150,alpha=.5,edgecolor='black',linewidth=2)
plt.plot([.5, 2], [perf_jitt[4], perf_jitt[4]],'r--',linewidth=2)
plt.plot([3, 4.5], [perf_inv[4], perf_inv[4]],'r--',linewidth=2)
plt.plot([5.5, 7], [perf_swap[4], perf_swap[4]],'r--',linewidth=2)
plt.plot([8, 9.5], [perf_miss[4], perf_miss[4]],'r--',linewidth=2)
plt.plot([10.5, 12],[perf_score[4], perf_score[4]],'r--',linewidth=2)
plt.plot([13, 14.5],[perf_bk_fp[4], perf_bk_fp[4]],'r--',linewidth=2)
plt.plot([15.5, 17],[perf_bk_fn[4], perf_bk_fn[4]],'r--',linewidth=2)
yy = -.05/2.
ax.annotate('Jitter', xy=(1.25,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
ax.annotate('Inversion', xy=(3.75,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
ax.annotate('Swap', xy=(6.25,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
ax.annotate('Miss', xy=(8.75,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
ax.annotate('Score', xy=(11.25,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
ax.annotate('Bkgd. FP', xy=(13.75,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
ax.annotate('FN', xy=(16.25,yy),
horizontalalignment='center',
verticalalignment='center',fontsize=20)
plt.xticks(x,['m','l','xl','xxl','m','l','xl','xxl','m','l','xl','xxl',
'm','l','xl','xxl','m','l','xl','xxl','m','l','xl','xxl',
'm','l','xl','xxl'])
plt.xlim([0,17.5])
plt.ylim([-.05,max(y)+.05])
plt.grid()
path = '%s/errors_sensitivity.pdf'%loc_dir
paths['err_size_sensitivity'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(10,10))
ax.set_facecolor('lightgray')
plt.ylabel("AP",fontsize=20)
plt.title("AP Sensitivity over size @ OKS Eval Thresh=%.2f"%oks,fontsize=20)
x = [1,2,3,4]
y = oks_75_auc[:4]
plt.bar(x,y,color='b',alpha=.7,align='center',width=.85)
plt.plot([.5,4.5], [oks_75_auc[4], oks_75_auc[4]],'r--',linewidth=3)
plt.xticks(x,['m','l','xl','xxl'])
plt.xlim([0,5])
plt.grid()
path = '%s/ap_sensitivity.pdf'%loc_dir
paths['ap_size_sensitivity'] = path
plt.savefig(path,bbox_inches='tight')
plt.close()
f.write("\nOKS %.2f: Sensitivity[%.3f], Impact[%.3f]\n"%(oks, max(oks_75_auc[:4])-min(oks_75_auc[:4]), max(oks_75_auc[:4])-oks_75_auc[4]))
f.write("Jitter: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_jitt[:4])-min(perf_jitt[:4]),max(perf_jitt[:4])-perf_jitt[4]))
f.write("Inversion: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_inv[:4]) -min(perf_inv[:4]) ,max(perf_inv[:4])-perf_inv[4]))
f.write("Swap: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_swap[:4])-min(perf_swap[:4]),max(perf_swap[:4])-perf_swap[4]))
f.write("Miss: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_miss[:4])-min(perf_miss[:4]),max(perf_miss[:4])-perf_miss[4]))
f.write("Score: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_score[:4])-min(perf_score[:4]),max(perf_score[:4])-perf_score[4]))
f.write("Bkgd FP: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_bk_fp[:4])-min(perf_bk_fp[:4]),max(perf_bk_fp[:4])-perf_bk_fp[4]))
f.write("FN: Sensitivity[%.3f], Impact[%.3f]\n"%(max(perf_bk_fn[:4])-min(perf_bk_fn[:4]),max(perf_bk_fn[:4])-perf_bk_fn[4]))
f.write("\nDone, (t=%.2fs)."%(time.time()-tic))
f.close()
return paths
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_toolkits/axes_grid/examples/inset_locator_demo2.py | 8 | 1255 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import numpy as np
def get_demo_image():
from matplotlib.cbook import get_sample_data
import numpy as np
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
fig = plt.figure(1, [5,4])
ax = fig.add_subplot(111)
# prepare the demo image
Z, extent = get_demo_image()
Z2 = np.zeros([150, 150], dtype="d")
ny, nx = Z.shape
Z2[30:30+ny, 30:30+nx] = Z
# extent = [-3, 4, -4, 3]
ax.imshow(Z2, extent=extent, interpolation="nearest",
origin="lower")
axins = zoomed_inset_axes(ax, 6, loc=1) # zoom = 6
axins.imshow(Z2, extent=extent, interpolation="nearest",
origin="lower")
# sub region of the original image
x1, x2, y1, y2 = -1.5, -0.9, -2.5, -1.9
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
plt.xticks(visible=False)
plt.yticks(visible=False)
# draw a bbox of the region of the inset axes in the parent axes and
# connecting lines between the bbox and the inset axes area
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.draw()
plt.show()
| gpl-2.0 |
idaholab/raven | plugins/ExamplePlugin/src/CorrelationPlot.py | 1 | 3426 | """
Author: talbpaul
Date : 2021-04-02
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from PluginBaseClasses.OutStreamPlotPlugin import PlotPlugin, InputTypes, InputData
class Correlation(PlotPlugin):
# Example Plot plugin class
@classmethod
def getInputSpecification(cls):
"""
Define the acceptable user inputs for this class.
@ In, None
@ Out, specs, InputData.ParameterInput,
"""
specs = super().getInputSpecification()
specs.addSub(InputData.parameterInputFactory('bins', contentType=InputTypes.IntegerType))
specs.addSub(InputData.parameterInputFactory('variables', contentType=InputTypes.StringListType))
specs.addSub(InputData.parameterInputFactory('source', contentType=InputTypes.StringType))
return specs
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
super().__init__()
self.printTag = 'ExamplePlugin.Correlation'
self._numBins = 10 # number of bins to use; np default is 10 currently
self._vars = None # list of variables to plot correlations for
self._sourceName = None # name of source data object
self._source = None # actual source data object
def handleInput(self, spec):
"""
Reads in data from the input file
@ In, spec, InputData.ParameterInput, input information
@ Out, None
"""
super().handleInput(spec)
for node in spec.subparts:
if node.getName() == 'bins':
self._numBins = node.value
elif node.getName() == 'variables':
self._vars = node.value
elif node.getName() == 'source':
self._sourceName = node.value
# input checking
if self._vars is None:
self.raiseAnError(IOError, 'Input missing the <variables> node!')
if self._sourceName is None:
self.raiseAnError(IOError, 'Input missing the <source> node!')
def initialize(self, stepEntities):
"""
Set up plotter for each run
@ In, stepEntities, dict, entities from the Step
@ Out, None
"""
super().initialize(stepEntities)
src = self.findSource(self._sourceName, stepEntities)
if src is None:
self.raiseAnError(IOError, f'Source DataObject {self._sourceName} was not found in the Step!')
self._source = src
def run(self):
"""
Generate the plot
@ In, None
@ Out, None
"""
n = len(self._vars)
fig, axes = plt.subplots(n, n, tight_layout=True)
data = self._source.asDataset()
for v1, var1 in enumerate(self._vars):
var1Data = data[var1].values
for v2, var2 in enumerate(self._vars):
ax = axes[v2, v1] # TODO wasn't this a flattened array for some matplotlibs?
if var1 == var2:
counts, edges = np.histogram(var1Data, bins=self._numBins)
ax.step(0.5 * (edges[:-1] + edges[1:]), counts, '.-', where='mid')
ax.set_xlabel(var1)
ax.set_ylabel(var1)
else:
var2Data = data[var2].values
ax.scatter(var1Data, var2Data, marker='.')
ax.set_xlabel(var1)
ax.set_ylabel(var2)
if v1 == 0:
ax.set_ylabel(var2)
else:
ax.set_ylabel('')
if v2 == n - 1:
ax.set_xlabel(var1)
else:
ax.set_xlabel('')
fName = os.path.abspath(f'{self.name}.png')
plt.savefig(fName)
self.raiseAMessage(f'Saved figure to "{fName}"')
| apache-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 50 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
luamct/WebSci14 | features/color/stats.py | 1 | 1332 | '''
Created on 13/08/2013
@author: Felipe Costa
'''
from math import sqrt, atan2, pi
import matplotlib.pyplot as pp
import numpy as np
import utils
class Statistics :
def __init__(self):
self.table_name = "stats"
def get_table_name(self):
return self.table_name
def process(self,rgb_img):
# Get the right color space representation
img = utils.image.rgb2ihls(rgb_img)
Y = img[:,:,1]
Y_mean = np.mean(Y)
Y_std = np.std(Y)
S = img[:,:,2]
S_mean = np.mean(S)
S_std = np.std(S)
# Hue mean is calculated using circular statistics
S /= 255.0
As = np.sum(np.cos(img[:,:,0])*S)
Bs = np.sum(np.sin(img[:,:,0])*S)
# Fix negatives values
H_mean = atan2(Bs,As)
if H_mean<0 :
H_mean += 2*pi
# Circular variance
pixels = img.shape[0]*img.shape[1]
H_std = 1.0 - (sqrt(As**2 + Bs**2)/pixels)
return {'H_mean': H_mean, 'H_std': H_std,
'Y_mean': Y_mean, 'Y_std': Y_std,
'S_mean': S_mean, 'S_std': S_std }
def test():
from utils.image import rgb2ihls
imgs = {'ihls' : rgb2ihls(pp.imread("../in/purple.jpg"))}
# ihls = np.array([[np.pi/2,100,120],
# [np.pi/2,100,120],
# [3*np.pi/2,100,120],
# [3*np.pi/2,100,120] ]).reshape(2,2,3)
sts = Statistics()
print sts.process(imgs)
if __name__ == "__main__":
test()
| gpl-3.0 |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/results/2_categories/test10_cross_validate_categories_mov_fixed_1200ms_scaled_method_v.py | 1 | 4633 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:12]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=3)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
#show()
| mit |
ycaihua/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
yl565/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
neurotechuoft/MindType | Code/V1/src/p300_service/tests/plot_data.py | 1 | 1662 | import numpy as np
import scipy.stats as st
import pickle
from p300_service import ml
import matplotlib.pyplot as plt
N = 120 # number of trials
M = 4 # number of channels
F = 256 # number of features
with open('data/train_data.pickle', 'rb') as f:
train_data = pickle.load(f)
with open('data/test_data.pickle', 'rb') as f:
test_data = pickle.load(f)
X_train, y_train = ml.create_input_target(train_data)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test, y_test = ml.create_input_target(test_data)
X_test = np.array(X_test)
y_test = np.array(y_test)
p300 = np.concatenate((X_train[np.squeeze(np.argwhere(y_train))],
X_test[np.squeeze(np.argwhere(y_test))]))
no_p300 = np.concatenate((X_train[np.squeeze(np.argwhere(np.abs(y_train - 1.)))],
X_test[np.squeeze(np.argwhere(np.abs(y_test - 1.)))]))
p300 = p300[::4, :]
no_p300 = no_p300[::4, :]
p300_ci = st.sem(p300) * st.t.ppf((1.975) / 2., p300.shape[1] - 1)
no_p300_ci = st.sem(no_p300) * st.t.ppf((1.975) / 2., no_p300.shape[1] - 1)
p300 = np.mean(p300, axis=0)
no_p300 = np.mean(no_p300, axis=0)
time = np.arange(100, 100 + p300.size * 12, 12)
fig, ax = plt.subplots()
ax.plot(time, p300, label='P300', color='red')
ax.fill_between(time, p300 - p300_ci, p300 + p300_ci, color='red', alpha = 0.2, label='0.975 CI')
ax.plot(time, no_p300, label='no P300', color='blue')
ax.fill_between(time, no_p300 - no_p300_ci, no_p300 + no_p300_ci, color='blue', alpha = 0.2, label='0.975 CI')
ax.set_ylim([-20, 35])
ax.legend(loc='upper left')
ax.set(xlabel='Time (ms)', ylabel='Voltage (uV)',
title='TP10')
plt.show()
| agpl-3.0 |
rahul-c1/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
jblackburne/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
michrawson/SVM_Implicit_Surface_Reconstruction | KernelRidgeRegression.py | 2 | 1721 | from sklearn.datasets import make_regression
from sklearn.cross_validation import train_test_split
from sklearn.kernel_ridge import KernelRidge
import sys
import numpy as np
def kernel(a,b):
return np.dot(a,b)
assert kernel([1,1],[1,-1]) == 0
def kernel_ridge_regression(X_train,y_train, Lambda):
y_train = np.matrix(y_train).transpose()
K = np.matrix(np.zeros( (len(X_train), len(X_train)) ))
for i in range(0, len(X_train)):
for j in range(0, len(X_train)):
K[ (i,j) ] = kernel(X_train[i], X_train[j])
alpha = np.linalg.inv( K + (Lambda*np.identity(len(X_train))) )* y_train
alpha = np.squeeze(np.asarray(alpha))
def f(x):
sum = 0.
for i in range(0,len(X_train)):
sum += alpha[i] * kernel(X_train[i],x)
return sum
return f
def score(f, X_test, y_test):
error = 0.
for i in range(0, len(X_test)):
prediction = f(X_test[i])
if isinstance(prediction,np.ndarray):
prediction = prediction[0]
error += pow((prediction - y_test[i]),2)
return error/len(X_test)
# Make up data
X, y, true_coefficient = make_regression(n_samples=80, n_features=30,
n_informative=20, noise=10, coef=True,
random_state=20140210)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5)
# Run Scikit Kernel Ridge Regression
clf = KernelRidge()
clf.fit(X_train,y_train)
print 'SCIKIT: mean square test error:', score( clf.predict, X_test, y_test)
# Run this implementation
f = kernel_ridge_regression(X_train,y_train,1)
score_val = score(f, X_test, y_test)
print 'Custom: mean square test error:', score_val
| mit |
ARudiuk/mne-python | tutorials/plot_sensors_time_frequency.py | 3 | 5104 | """
.. _tut_sensors_time_frequency:
=============================================
Frequency and time-frequency sensors analysis
=============================================
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use the somatosensory dataset that contains so
called event related synchronizations (ERS) / desynchronizations (ERD) in
the beta band.
"""
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper
from mne.datasets import somato
###############################################################################
# Set parameters
data_path = somato.data_path()
raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
epochs.resample(150., npad='auto') # resample to reduce computation time
###############################################################################
# Frequency analysis
# ------------------
#
# We start by exploring the frequence content of our epochs.
###############################################################################
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2., fmax=40.)
###############################################################################
# Now let's take a look at the spatial distributions of the PSD.
epochs.plot_psd_topomap(ch_type='grad', normalize=True)
###############################################################################
# Alternatively, you can also create PSDs from Epochs objects with functions
# that start with psd_ such as
# :func:`mne.time_frequency.psd_multitaper` and
# :func:`mne.time_frequency.psd_welch`.
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
###############################################################################
# Time-frequency analysis: power and intertrial coherence
# -------------------------------------------------------
#
# We now compute time-frequency representations (TFRs) from our Epochs.
# We'll look at power and intertrial coherence (ITC).
#
# To this we'll use the function :func:`mne.time_frequency.tfr_morlet`
# but you can also use :func:`mne.time_frequency.tfr_multitaper`
# or :func:`mne.time_frequency.tfr_stockwell`.
freqs = np.arange(6, 30, 3) # define frequencies of interest
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
###############################################################################
# Inspect power
# -------------
#
# .. note::
# The generated figures are interactive. In the topo you can click
# on an image to visualize the data for one censor.
# You can also select a portion in the time-frequency plane to
# obtain a topomap for a certain time-frequency region.
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio')
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', vmax=0.45, show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', vmax=0.45, show=False)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Inspect ITC
# -----------
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
###############################################################################
# .. note::
# Baseline correction can be applied to power or done in plots
# To illustrate the baseline correction in plots the next line is
# commented power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
###############################################################################
# Exercise
# --------
#
# - Visualize the intertrial coherence values as topomaps as done with
# power.
| bsd-3-clause |
XueqingLin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 1 | 57602 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class LinearClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age': tf.SparseTensor(values=['1'], indices=[[0, 0]], shape=[1, 1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.sparse_column_with_hash_bucket('age', 2)
classifier = tf.contrib.learn.LinearClassifier(
_joint_weight=True,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = tf.contrib.layers.real_valued_column('', dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
def _optimizer():
return tf.train.FtrlOptimizer(learning_rate=0.1)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=_optimizer,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer='Ftrl',
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]], dtype=tf.float32)
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32)}
return features, target
def _my_metric_op(predictions, targets):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, targets))
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')])
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn_train,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predictions = classifier.predict(input_fn=_input_fn_train)
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn_train,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn_train,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
}, tf.constant([[.7], [0]], dtype=tf.float32)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predictions_proba = classifier.predict_proba(input_fn=input_fn)
# Prediction probabilities mirror the target column, which proves that the
# classifier learns from float input.
self.assertAllClose(predictions_proba, [[.3, .7], [1., 0.]], atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]])
return features, target
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=sparse_features,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({})))
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(tf.constant([1]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=['english'], indices=[[0, 0]], shape=[1, 1]),
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(classifier.predict(input_fn=predict_input_fn,
as_iterable=True))
out1_proba = list(classifier.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
del classifier
classifier2 = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
out2_class = list(classifier2.predict(input_fn=predict_input_fn,
as_iterable=True))
out2_proba = list(classifier2.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.LinearClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_target_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean target should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_target_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': tf.constant([[20], [20], [20]]),
'weights': tf.constant([[100], [1], [1]]),
}
target = tf.constant([[1], [0], [0]])
return features, target
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[1], [1], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'dense_feature': tf.constant([[500.0, 800.0], [200.0, 600.0]])
}, tf.constant([[0], [1]])
dense_feature = tf.contrib.layers.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=cont_features,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_iris_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
target = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predictions, target, atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
target = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predictions, target, atol=0.1)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, target
def _my_metric_op(predictions, targets):
return tf.reduce_sum(tf.mul(predictions, targets))
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn_train,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predictions = regressor.predict(input_fn=_input_fn_train)
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(TypeError):
regressor.evaluate(
input_fn=_input_fn_train,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns)
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'x': tf.constant(x),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant(y)
x_column = tf.contrib.layers.real_valued_column('x', dimension=3)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose([w[0] for w in weights],
regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [5.0], [7.0]])
}, tf.constant([[1.55], [-1.25], [-3.0]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant([[1.4], [-0.8], [2.6]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder': tf.constant([[0.0]]*num_examples),
}, tf.constant([[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = tf.contrib.layers.real_valued_column('place_holder')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[place_holder],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(regressor.get_variable_value('linear/bias_weight')[0],
0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[x] for x in
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half/10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half/10)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[1 if x%10 == 0 else 0] for x in range(half)] +
[[-1 if x%10 == 0 else 0] for x in range(half)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/backends/tkagg.py | 10 | 1250 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import tkinter as Tk
import numpy as np
from matplotlib.backends import _tkagg
def blit(photoimage, aggimage, bbox=None, colormode=1):
tk = photoimage.tk
if bbox is not None:
bbox_array = bbox.__array__()
else:
bbox_array = None
data = np.asarray(aggimage)
try:
tk.call(
"PyAggImagePhoto", photoimage,
id(data), colormode, id(bbox_array))
except Tk.TclError:
try:
try:
_tkagg.tkinit(tk.interpaddr(), 1)
except AttributeError:
_tkagg.tkinit(id(tk), 0)
tk.call("PyAggImagePhoto", photoimage,
id(data), colormode, id(bbox_array))
except (ImportError, AttributeError, Tk.TclError):
raise
def test(aggimage):
import time
r = Tk.Tk()
c = Tk.Canvas(r, width=aggimage.width, height=aggimage.height)
c.pack()
p = Tk.PhotoImage(width=aggimage.width, height=aggimage.height)
blit(p, aggimage)
c.create_image(aggimage.width,aggimage.height,image=p)
blit(p, aggimage)
while 1: r.update_idletasks()
| apache-2.0 |
joernhees/scikit-learn | examples/ensemble/plot_forest_iris.py | 18 | 6190 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
plt.scatter(X[:, 0], X[:, 1], c=y,
cmap=ListedColormap(['r', 'y', 'b']))
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
michaelaye/scikit-image | skimage/viewer/canvastools/base.py | 43 | 3877 | import numpy as np
from matplotlib import lines
__all__ = ['CanvasToolBase', 'ToolHandles']
def _pass(*args):
pass
class CanvasToolBase(object):
"""Base canvas tool for matplotlib axes.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
"""
def __init__(self, manager, on_move=None, on_enter=None, on_release=None,
useblit=True, ax=None):
self.manager = manager
self.ax = manager.ax
self.artists = []
self.active = True
self.callback_on_move = _pass if on_move is None else on_move
self.callback_on_enter = _pass if on_enter is None else on_enter
self.callback_on_release = _pass if on_release is None else on_release
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
def hit_test(self, event):
return False
def redraw(self):
self.manager.redraw()
def set_visible(self, val):
for artist in self.artists:
artist.set_visible(val)
def on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.set_visible(False)
self.manager.redraw()
def on_mouse_press(self, event):
pass
def on_mouse_release(self, event):
pass
def on_move(self, event):
pass
def on_scroll(self, event):
pass
def remove(self):
self.manager.remove_tool(self)
@property
def geometry(self):
"""Geometry information that gets passed to callback functions."""
return None
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False)
props.update(marker_props if marker_props is not None else {})
self._markers = lines.Line2D(x, y, animated=True, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
dist = np.sqrt(np.sum(diff**2, axis=1))
return np.argmin(dist), np.min(dist)
| bsd-3-clause |
adamtiger/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
eickenberg/scikit-learn | sklearn/tests/test_naive_bayes.py | 16 | 12584 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits
from sklearn.cross_validation import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""Test Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
"""Test input checks for the fit method"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
"""coef_ and intercept_ should have shapes as in other linear models.
Non-regression test for issue #2127.
"""
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
| bsd-3-clause |
jjx02230808/project0223 | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
bjlittle/iris | docs/gallery_code/general/plot_inset.py | 1 | 2280 | """
Test Data Showing Inset Plots
=============================
This example demonstrates the use of a single 3D data cube with time, latitude
and longitude dimensions to plot a temperature series for a single latitude
coordinate, with an inset plot of the data region.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
cube1 = iris.load_cube(iris.sample_data_path("ostia_monthly.nc"))
# Slice into cube to retrieve data for the inset map showing the
# data region
region = cube1[-1, :, :]
# Average over latitude to reduce cube to 1 dimension
plot_line = region.collapsed("latitude", iris.analysis.MEAN)
# Open a window for plotting
fig = plt.figure()
# Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
ax_main = fig.add_subplot(1, 1, 1)
# Produce a quick plot of the 1D cube
qplt.plot(plot_line)
# Set x limits to match the data
ax_main.set_xlim(0, plot_line.coord("longitude").points.max())
# Adjust the y limits so that the inset map won't clash with main plot
ax_main.set_ylim(294, 310)
ax_main.set_title("Meridional Mean Temperature")
# Add grid lines
ax_main.grid()
# Add a second set of axes specifying the fractional coordinates within
# the figure with bottom left corner at x=0.55, y=0.58 with width
# 0.3 and height 0.25.
# Also specify the projection
ax_sub = fig.add_axes(
[0.55, 0.58, 0.3, 0.25],
projection=ccrs.Mollweide(central_longitude=180),
)
# Use iris.plot (iplt) here so colour bar properties can be specified
# Also use a sequential colour scheme to reduce confusion for those with
# colour-blindness
iplt.pcolormesh(region, cmap="Blues")
# Manually set the orientation and tick marks on your colour bar
ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
plt.colorbar(orientation="horizontal", ticks=ticklist)
ax_sub.set_title("Data Region")
# Add coastlines
ax_sub.coastlines()
# request to show entire map, using the colour mesh on the data region only
ax_sub.set_global()
qplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
zhenv5/scikit-learn | sklearn/utils/fixes.py | 39 | 13318 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
| bsd-3-clause |
robin-lai/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
craigcitro/pydatalab | datalab/bigquery/_table.py | 4 | 35527 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Table, and related Table BigQuery APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from builtins import str
from past.utils import old_div
from builtins import object
import calendar
import codecs
import csv
import datetime
import pandas
import time
import traceback
import uuid
import sys
import datalab.context
import datalab.utils
from . import _api
from . import _csv_options
from . import _job
from . import _parser
from . import _schema
from . import _utils
# import of Query is at end of module as we have a circular dependency of
# Query.execute().results -> Table and Table.sample() -> Query
class TableMetadata(object):
"""Represents metadata about a BigQuery table."""
def __init__(self, table, info):
"""Initializes a TableMetadata instance.
Args:
table: the Table object this belongs to.
info: The BigQuery information about this table as a Python dictionary.
"""
self._table = table
self._info = info
@property
def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp)
@property
def description(self):
"""The description of the table if it exists."""
return self._info.get('description', '')
@property
def expires_on(self):
"""The timestamp for when the table will expire, or None if unknown."""
timestamp = self._info.get('expirationTime', None)
if timestamp is None:
return None
return _parser.Parser.parse_timestamp(timestamp)
@property
def friendly_name(self):
"""The friendly name of the table if it exists."""
return self._info.get('friendlyName', '')
@property
def modified_on(self):
"""The timestamp for when the table was last modified."""
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp)
@property
def rows(self):
"""The number of rows within the table, or -1 if unknown. """
return int(self._info['numRows']) if 'numRows' in self._info else -1
@property
def size(self):
"""The size of the table in bytes, or -1 if unknown. """
return int(self._info['numBytes']) if 'numBytes' in self._info else -1
def refresh(self):
""" Refresh the metadata. """
self._info = self._table._load_info()
class Table(object):
"""Represents a Table object referencing a BigQuery table. """
# Allowed characters in a BigQuery table column name
_VALID_COLUMN_NAME_CHARACTERS = '_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
# When fetching table contents, the max number of rows to fetch per HTTP request
_DEFAULT_PAGE_SIZE = 1024
# Milliseconds per week
_MSEC_PER_WEEK = 7 * 24 * 3600 * 1000
def __init__(self, name, context=None):
"""Initializes an instance of a Table object. The Table need not exist yet.
Args:
name: the name of the table either as a string or a 3-part tuple (projectid, datasetid, name).
If a string, it must have the form '<project>:<dataset>.<table>' or '<dataset>.<table>'.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
Raises:
Exception if the name is invalid.
"""
if context is None:
context = datalab.context.Context.default()
self._context = context
self._api = _api.Api(context)
self._name_parts = _utils.parse_table_name(name, self._api.project_id)
self._full_name = '%s:%s.%s%s' % self._name_parts
self._info = None
self._cached_page = None
self._cached_page_index = 0
self._schema = None
@property
def name(self):
"""The TableName named tuple (project_id, dataset_id, table_id, decorator) for the table."""
return self._name_parts
@property
def job(self):
""" For tables resulting from executing queries, the job that created the table.
Default is None for a Table object; this is overridden by QueryResultsTable.
"""
return None
@property
def is_temporary(self):
""" Whether this is a short-lived table or not. Always False for non-QueryResultsTables. """
return False
def _load_info(self):
"""Loads metadata about this table."""
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e
@property
def metadata(self):
"""Retrieves metadata about the table.
Returns:
A TableMetadata object.
Raises
Exception if the request could not be executed or the response was malformed.
"""
self._load_info()
return TableMetadata(self, self._info)
def exists(self):
"""Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table.
"""
try:
info = self._api.tables_get(self._name_parts)
except datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
self._info = info
return True
def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists()
def create(self, schema, overwrite=False):
""" Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False.
"""
if overwrite and self.exists():
self.delete()
if not isinstance(schema, _schema.Schema):
# Convert to a Schema object
schema = _schema.Schema(schema)
try:
response = self._api.tables_insert(self._name_parts, schema=schema._bq_schema)
except Exception as e:
raise e
if 'selfLink' in response:
self._schema = schema
return self
raise Exception("Table %s could not be created as it already exists" % self._full_name)
def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
sql = self._repr_sql_()
return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields,
sampling=sampling).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier)
@staticmethod
def _encode_dict_as_row(record, column_name_map):
""" Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary.
"""
for k in list(record.keys()):
v = record[k]
# If the column is a date, convert to ISO string.
if isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime):
v = record[k] = record[k].isoformat()
# If k has invalid characters clean it up
if k not in column_name_map:
column_name_map[k] = ''.join(c for c in k if c in Table._VALID_COLUMN_NAME_CHARACTERS)
new_k = column_name_map[k]
if k != new_k:
record[new_k] = v
del record[k]
return record
def insert_data(self, data, include_index=False, index_name=None):
""" Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed.
"""
# TODO(gram): we could create the Table here is it doesn't exist using a schema derived
# from the data. IIRC we decided not to but doing so seems less unwieldy that having to
# create it first and then validate the schema against it itself.
# There are BigQuery limits on the streaming API:
#
# max_rows_per_post = 500
# max_bytes_per_row = 20000
# max_rows_per_second = 10000
# max_bytes_per_post = 1000000
# max_bytes_per_second = 10000000
#
# It is non-trivial to enforce these here, and the max bytes per row is not something we
# can really control. As an approximation we enforce the 500 row limit
# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).
max_rows_per_post = 500
post_interval = 0.05
# TODO(gram): add different exception types for each failure case.
if not self.exists():
raise Exception('Table %s does not exist.' % self._full_name)
data_schema = _schema.Schema.from_data(data)
if isinstance(data, list):
if include_index:
if not index_name:
index_name = 'Index'
data_schema._add_field(index_name, 'INTEGER')
table_schema = self.schema
# Do some validation of the two schema to make sure they are compatible.
for data_field in data_schema:
name = data_field.name
table_field = table_schema[name]
if table_field is None:
raise Exception('Table does not contain field %s' % name)
data_type = data_field.data_type
table_type = table_field.data_type
if table_type != data_type:
raise Exception('Field %s in data has type %s but in table has type %s' %
(name, data_type, table_type))
total_rows = len(data)
total_pushed = 0
job_id = uuid.uuid4().hex
rows = []
column_name_map = {}
is_dataframe = isinstance(data, pandas.DataFrame)
if is_dataframe:
# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)
# drops the original index and uses an integer range.
gen = data.reset_index(drop=not include_index).iterrows()
else:
gen = enumerate(data)
for index, row in gen:
if is_dataframe:
row = row.to_dict()
elif include_index:
row[index_name] = index
rows.append({
'json': self._encode_dict_as_row(row, column_name_map),
'insertId': job_id + str(index)
})
total_pushed += 1
if (total_pushed == total_rows) or (len(rows) == max_rows_per_post):
try:
response = self._api.tabledata_insert_all(self._name_parts, rows)
except Exception as e:
raise e
if 'insertErrors' in response:
raise Exception('insertAll failed: %s' % response['insertErrors'])
time.sleep(post_interval) # Streaming API is rate-limited
rows = []
# Block until data is ready
while True:
self._info = self._api.tables_get(self._name_parts)
if 'streamingBuffer' not in self._info or \
'estimatedRows' not in self._info['streamingBuffer'] or \
int(self._info['streamingBuffer']['estimatedRows']) > 0:
break
time.sleep(2)
return self
def _init_job_from_response(self, response):
""" Helper function to create a Job instance from a response. """
job = None
if response and 'jobReference' in response:
job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context)
return job
def extract_async(self, destination, format='csv', csv_delimiter=',', csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise datalab.utils.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e)))
def extract(self, destination, format='csv', csv_delimiter=',', csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job
def load_async(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied.
"""
if source_format == 'csv':
source_format = 'CSV'
elif source_format == 'json':
source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
if not(mode == 'create' or mode == 'append' or mode == 'overwrite'):
raise Exception("Invalid mode %s" % mode)
if csv_options is None:
csv_options = _csv_options.CSVOptions()
try:
response = self._api.jobs_insert_load(source, self._name_parts,
append=(mode == 'append'),
overwrite=(mode == 'overwrite'),
create=(mode == 'create'),
source_format=source_format,
field_delimiter=csv_options.delimiter,
allow_jagged_rows=csv_options.allow_jagged_rows,
allow_quoted_newlines=csv_options.allow_quoted_newlines,
encoding=csv_options.encoding.upper(),
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
quote=csv_options.quote,
skip_leading_rows=csv_options.skip_leading_rows)
except Exception as e:
raise e
return self._init_job_from_response(response)
def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job
def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
""" Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
"""
if not start_row:
start_row = 0
elif start_row < 0: # We are measuring from the table end
if self.length >= 0:
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if max_rows and count >= max_rows:
page_token = None
else:
if max_rows and page_size > (max_rows - count):
max_results = max_rows - count
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token,
max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row,
max_results=max_results)
except Exception as e:
raise e
page_token = response['pageToken'] if 'pageToken' in response else None
if 'rows' in response:
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return rows, page_token
return _retrieve_rows
def range(self, start_row=0, max_rows=None):
""" Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
return iter(datalab.utils.Iterator(fetcher))
def to_dataframe(self, start_row=0, max_rows=None):
""" Exports the table to a Pandas dataframe.
Args:
start_row: the row of the table at which to start the export (default 0)
max_rows: an upper limit on the number of rows to export (default None)
Returns:
A Pandas dataframe containing the table data.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
count = 0
page_token = None
df = None
while True:
page_rows, page_token = fetcher(page_token, count)
if len(page_rows):
count += len(page_rows)
if df is None:
df = pandas.DataFrame.from_records(page_rows)
else:
df = df.append(page_rows, ignore_index=True)
if not page_token:
break
# Need to reorder the dataframe to preserve column ordering
ordered_fields = [field.name for field in self.schema]
return df[ordered_fields] if df is not None else pandas.DataFrame()
def to_file(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Save the results to a local file in CSV format.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Raises:
An Exception if the operation failed.
"""
f = codecs.open(destination, 'w', 'utf-8')
fieldnames = []
for column in self.schema:
fieldnames.append(column.name)
if sys.version_info[0] == 2:
csv_delimiter = csv_delimiter.encode('unicode_escape')
writer = csv.DictWriter(f, fieldnames=fieldnames, delimiter=csv_delimiter)
if csv_header:
writer.writeheader()
for row in self:
writer.writerow(row)
f.close()
@datalab.utils.async_method
def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed.
"""
self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header)
@property
def schema(self):
"""Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed.
"""
if not self._schema:
try:
self._load_info()
self._schema = _schema.Schema(self._info['schema']['fields'])
except KeyError:
raise Exception('Unexpected table response: missing schema')
return self._schema
def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e
def _repr_sql_(self):
"""Returns a representation of the table for embedding into a SQL statement.
Returns:
A formatted table name for use within SQL statements.
"""
return '[' + self._full_name + ']'
def __repr__(self):
"""Returns a representation for the table for showing in the notebook.
"""
return 'Table %s' % self._full_name
def __str__(self):
"""Returns a string representation of the table using its specified name.
Returns:
The string representation of this object.
"""
return self._full_name
@property
def length(self):
""" Get the length of the table (number of rows). We don't use __len__ as this may
return -1 for 'unknown'.
"""
return self.metadata.rows
def __iter__(self):
""" Get an iterator for the table.
"""
return self.range(start_row=0)
def __getitem__(self, item):
""" Get an item or a slice of items from the table. This uses a small cache
to reduce the number of calls to tabledata.list.
Note: this is a useful function to have, and supports some current usage like
query.results()[0], but should be used with care.
"""
if isinstance(item, slice):
# Just treat this as a set of calls to __getitem__(int)
result = []
i = item.start
step = item.step if item.step else 1
while i < item.stop:
result.append(self[i])
i += step
return result
# Handle the integer index case.
if item < 0:
if self.length >= 0:
item += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
if not self._cached_page \
or self._cached_page_index > item \
or self._cached_page_index + len(self._cached_page) <= item:
# cache a new page. To get the start row we round to the nearest multiple of the page
# size.
first = old_div(item, self._DEFAULT_PAGE_SIZE) * self._DEFAULT_PAGE_SIZE
count = self._DEFAULT_PAGE_SIZE
if self.length >= 0:
remaining = self.length - first
if count > remaining:
count = remaining
fetcher = self._get_row_fetcher(start_row=first, max_rows=count, page_size=count)
self._cached_page_index = first
self._cached_page, _ = fetcher(None, 0)
return self._cached_page[item - self._cached_page_index]
@staticmethod
def _convert_decorator_time(when):
if isinstance(when, datetime.datetime):
value = 1000 * (when - datetime.datetime.utcfromtimestamp(0)).total_seconds()
elif isinstance(when, datetime.timedelta):
value = when.total_seconds() * 1000
if value > 0:
raise Exception("Invalid snapshot relative when argument: %s" % str(when))
else:
raise Exception("Invalid snapshot when argument type: %s" % str(when))
if value < -Table._MSEC_PER_WEEK:
raise Exception("Invalid snapshot relative when argument: must be within 7 days: %s"
% str(when))
if value > 0:
now = 1000 * (datetime.datetime.utcnow() -
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# Check that an abs value is not more than 7 days in the past and is
# not in the future
if not ((now - Table._MSEC_PER_WEEK) < value < now):
raise Exception("Invalid snapshot absolute when argument: %s" % str(when))
return int(value)
def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context)
def window(self, begin, end=None):
""" Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use window() on an already decorated table")
start = Table._convert_decorator_time(begin)
if end is None:
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
# Both values must have the same sign
if (start > 0 >= stop) or (stop > 0 >= start):
raise Exception("window: Between arguments must both be absolute or relative: %s, %s" %
(str(begin), str(end)))
# start must be less than stop
if start > stop:
raise Exception("window: Between arguments: begin must be before end: %s, %s" %
(str(begin), str(end)))
return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context)
def to_query(self, fields=None):
""" Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context)
| apache-2.0 |
yask123/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
hjanime/bcbio-nextgen | bcbio/pipeline/qcsummary.py | 1 | 44083 | """Quality control and summary metrics for next-gen alignments and analysis.
"""
import collections
import contextlib
import csv
import os
import shutil
import subprocess
import pandas as pd
import lxml.html
import yaml
from datetime import datetime
# allow graceful during upgrades
try:
import matplotlib
matplotlib.use('Agg', force=True)
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
plt = None
try:
from fadapa import Fadapa
except ImportError:
Fadapa = None
import pybedtools
import pysam
import toolz as tz
import toolz.dicttoolz as dtz
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils, run_info
from bcbio.install import _get_data_dir
from bcbio.provenance import do
import bcbio.rnaseq.qc
from bcbio.rnaseq.coverage import plot_gene_coverage
import bcbio.pipeline.datadict as dd
from bcbio.variation import bedutils
from bcbio import broad
# ## High level functions to generate summary
def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
sum_samples = run_parallel("pipeline_summary", samples)
qsign_info = run_parallel("qsignature_summary", [sum_samples])
summary_file = write_project_summary(sum_samples, qsign_info)
samples = []
for data in sum_samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
samples.append(data)
samples = _add_researcher_summary(samples, summary_file)
return samples
def pipeline_summary(data):
"""Provide summary information on processing sample.
"""
work_bam = data.get("work_bam")
if data["sam_ref"] is not None and work_bam and work_bam.endswith(".bam"):
logger.info("Generating summary files: %s" % str(data["name"]))
data["summary"] = _run_qc_tools(work_bam, data)
return [[data]]
def prep_pdf(qc_dir, config):
"""Create PDF from HTML summary outputs in QC directory.
Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1
Thanks to: https://www.biostars.org/p/16991/
Works around issues with CSS conversion on CentOS by adjusting CSS.
"""
html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html")
html_fixed = "%s-fixed%s" % os.path.splitext(html_file)
try:
topdf = config_utils.get_program("wkhtmltopdf", config)
except config_utils.CmdNotFound:
topdf = None
if topdf and utils.file_exists(html_file):
out_file = "%s.pdf" % os.path.splitext(html_file)[0]
if not utils.file_exists(out_file):
cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s"
% (html_file, html_fixed))
do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf")
cmd = [topdf, html_fixed, out_file]
do.run(cmd, "Convert QC HTML to PDF")
return out_file
def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
metrics = {}
to_run = []
if "fastqc" not in tz.get_in(("config", "algorithm", "tools_off"), data, []):
to_run.append(("fastqc", _run_fastqc))
if data["analysis"].lower().startswith("rna-seq"):
# to_run.append(("rnaseqc", bcbio.rnaseq.qc.sample_summary))
# to_run.append(("coverage", _run_gene_coverage))
# to_run.append(("complexity", _run_complexity))
to_run.append(("qualimap", _rnaseq_qualimap))
elif data["analysis"].lower().startswith("chip-seq"):
to_run.append(["bamtools", _run_bamtools_stats])
else:
to_run += [("bamtools", _run_bamtools_stats), ("gemini", _run_gemini_stats)]
if data["analysis"].lower().startswith(("standard", "variant2")):
to_run.append(["qsignature", _run_qsignature_generator])
if "qualimap" in tz.get_in(("config", "algorithm", "tools_on"), data, []):
to_run.append(("qualimap", _run_qualimap))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
for program_name, qc_fn in to_run:
cur_qc_dir = os.path.join(qc_dir, program_name)
cur_metrics = qc_fn(bam_file, data, cur_qc_dir)
metrics.update(cur_metrics)
ratio = bam.get_aligned_reads(bam_file, data)
# if (ratio < 0.60 and data['config']["algorithm"].get("kraken", None) and
# (data["analysis"].lower().startswith("rna-seq") or
# data["analysis"].lower().startswith("standard"))):
if data['config']["algorithm"].get("kraken", None):
cur_metrics = _run_kraken(data, ratio)
metrics.update(cur_metrics)
bam.remove("%s-downsample%s" % os.path.splitext(bam_file))
metrics["Name"] = data["name"][-1]
metrics["Quality format"] = utils.get_in(data,
("config", "algorithm",
"quality_format"),
"standard").lower()
return {"qc": qc_dir, "metrics": metrics}
# ## Generate project level QC summary for quickly assessing large projects
def write_project_summary(samples, qsign_info=None):
"""Write project summary information on the provided samples.
write out dirs, genome resources,
"""
work_dir = samples[0][0]["dirs"]["work"]
out_file = os.path.join(work_dir, "project-summary.yaml")
upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
if "dir" in samples[0][0]["upload"] else "")
test_run = samples[0][0].get("test_run", False)
date = str(datetime.now())
prev_samples = _other_pipeline_samples(out_file, samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump({"date": date}, out_handle,
default_flow_style=False, allow_unicode=False)
if test_run:
yaml.safe_dump({"test_run": True}, out_handle, default_flow_style=False,
allow_unicode=False)
if qsign_info:
qsign_out = utils.deepish_copy(qsign_info[0])
qsign_out.pop("out_dir", None)
yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False,
allow_unicode=False)
yaml.safe_dump({"upload": upload_dir}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
default_flow_style=False, allow_unicode=False)
return out_file
def _other_pipeline_samples(summary_file, cur_samples):
"""Retrieve samples produced previously by another pipeline in the summary output.
"""
cur_descriptions = set([s[0]["description"] for s in cur_samples])
out = []
if os.path.exists(summary_file):
with open(summary_file) as in_handle:
for s in yaml.load(in_handle).get("samples", []):
if s["description"] not in cur_descriptions:
out.append(s)
return out
def _save_fields(sample):
to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata",
"description"]
saved = {k: sample[k] for k in to_save if k in sample}
if "summary" in sample:
saved["summary"] = {"metrics": sample["summary"]["metrics"]}
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
saved["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
saved["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
saved["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return saved
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
# ## Generate researcher specific summaries
def _add_researcher_summary(samples, summary_yaml):
"""Generate summary files per researcher if organized via a LIMS.
"""
by_researcher = collections.defaultdict(list)
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
by_researcher[researcher].append(data["description"])
out_by_researcher = {}
for researcher, descrs in by_researcher.items():
out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher,
set(descrs), samples[0][0])
out = []
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
data["summary"]["researcher"] = out_by_researcher[researcher]
out.append([data])
return out
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data):
"""Generate a CSV file with summary information for a researcher on this project.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")),
"%s-summary.tsv" % run_info.clean_name(researcher))
metrics = ["Total reads", "Mapped reads", "Mapped reads pct", "Duplicates", "Duplicates pct"]
with open(summary_yaml) as in_handle:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(["Name"] + metrics)
for sample in yaml.safe_load(in_handle)["samples"]:
if sample["description"] in descrs:
row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "")
for x in metrics]
writer.writerow(row)
return out_file
# ## Run and parse read information from FastQC
class FastQCParser:
def __init__(self, base_dir, sample=None):
self._dir = base_dir
self.sample = sample
def get_fastqc_summary(self):
ignore = set(["Total Sequences", "Filtered Sequences",
"Filename", "File type", "Encoding"])
stats = {}
for stat_line in self._fastqc_data_section("Basic Statistics")[1:]:
k, v = stat_line.split("\t")[:2]
if k not in ignore:
stats[k] = v
return stats
def _fastqc_data_section(self, section_name):
out = []
in_section = False
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file):
with open(data_file) as in_handle:
for line in in_handle:
if line.startswith(">>%s" % section_name):
in_section = True
elif in_section:
if line.startswith(">>END"):
break
out.append(line.rstrip("\r\n"))
return out
def save_sections_into_file(self):
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file) and Fadapa:
parser = Fadapa(data_file)
module = [m[1] for m in parser.summary()][2:9]
for m in module:
out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv")
dt = self._get_module(parser, m)
dt.to_csv(out_file, sep="\t", index=False)
def _get_module(self, parser, module):
"""
Get module using fadapa package
"""
dt = []
lines = parser.clean_data(module)
header = lines[0]
for data in lines[1:]:
if data[0].startswith("#"): #some modules have two headers
header = data
continue
if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = self.sample
return dt
def _run_gene_coverage(bam_file, data, out_dir):
out_file = os.path.join(out_dir, "gene_coverage.pdf")
ref_file = utils.get_in(data, ("genome_resources", "rnaseq", "transcripts"))
count_file = data["count_file"]
if utils.file_exists(out_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
plot_gene_coverage(bam_file, ref_file, count_file, tx_out_file)
return {"gene_coverage": out_file}
def _run_kraken(data, ratio):
"""Run kraken, generating report in specified directory and parsing metrics.
Using only first paired reads.
"""
logger.info("Number of aligned reads < than 0.60 in %s: %s" % (str(data["name"]), ratio))
logger.info("Running kraken to determine contaminant: %s" % str(data["name"]))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
kraken_out = os.path.join(qc_dir, "kraken")
out = out_stats = None
db = data['config']["algorithm"]["kraken"]
kraken_cmd = config_utils.get_program("kraken", data["config"])
if db == "minikraken":
db = os.path.join(_get_data_dir(), "genomes", "kraken", "minikraken")
else:
if not os.path.exists(db):
logger.info("kraken: no database found %s, skipping" % db)
return {"kraken_report": "null"}
if not os.path.exists(os.path.join(kraken_out, "kraken_out")):
work_dir = os.path.dirname(kraken_out)
utils.safe_makedir(work_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fn_file = data["files"][0]
if fn_file.endswith("bam"):
logger.info("kraken: need fasta files as input")
return {"kraken_report": "null"}
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
out = os.path.join(tx_tmp_dir, "kraken_out")
out_stats = os.path.join(tx_tmp_dir, "kraken_stats")
cat = "zcat" if fn_file.endswith(".gz") else "cat"
cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick "
"--preload --min-hits 2 "
"--threads {num_cores} "
"--out {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals())
do.run(cl, "kraken: %s" % data["name"][-1])
if os.path.exists(kraken_out):
shutil.rmtree(kraken_out)
shutil.move(tx_tmp_dir, kraken_out)
metrics = _parse_kraken_output(kraken_out, db, data)
return metrics
def _parse_kraken_output(out_dir, db, data):
"""Parse kraken stat info comming from stderr,
generating report with kraken-report
"""
in_file = os.path.join(out_dir, "kraken_out")
stat_file = os.path.join(out_dir, "kraken_stats")
out_file = os.path.join(out_dir, "kraken_summary")
kraken_cmd = config_utils.get_program("kraken-report", data["config"])
classify = unclassify = None
with open(stat_file, 'r') as handle:
for line in handle:
if line.find(" classified") > -1:
classify = line[line.find("(") + 1:line.find(")")]
if line.find(" unclassified") > -1:
unclassify = line[line.find("(") + 1:line.find(")")]
if os.path.getsize(in_file) > 0 and not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals())
do.run(cl, "kraken report: %s" % data["name"][-1])
kraken = {"kraken_clas": classify, "kraken_unclas": unclassify}
kraken_sum = _summarize_kraken(out_file)
kraken.update(kraken_sum)
return kraken
def _summarize_kraken(fn):
"""get the value at species level"""
kraken = {}
list_sp, list_value = [], []
with open(fn) as handle:
for line in handle:
cols = line.strip().split("\t")
sp = cols[5].strip()
if len(sp.split(" ")) > 1 and not sp.startswith("cellular"):
list_sp.append(sp)
list_value.append(cols[0])
kraken = {"kraken_sp": list_sp, "kraken_value": list_value}
return kraken
def _run_fastqc(bam_file, data, fastqc_out):
"""Run fastqc, generating report in specified directory and parsing metrics.
Downsamples to 10 million reads to avoid excessive processing times with large
files, unless we're running a Standard/QC pipeline.
Handles fastqc 0.11+, which use a single HTML file and older versions that use
a directory of files + images. The goal is to eventually move to only 0.11+
"""
sentry_file = os.path.join(fastqc_out, "fastqc_report.html")
if not os.path.exists(sentry_file):
work_dir = os.path.dirname(fastqc_out)
utils.safe_makedir(work_dir)
ds_bam = (bam.downsample(bam_file, data, 1e7)
if data.get("analysis", "").lower() not in ["standard"]
else None)
bam_file = ds_bam if ds_bam else bam_file
fastqc_name = os.path.splitext(os.path.basename(bam_file))[0]
num_cores = data["config"]["algorithm"].get("num_cores", 1)
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
cl = [config_utils.get_program("fastqc", data["config"]),
"-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", "bam", bam_file]
do.run(cl, "FastQC: %s" % data["name"][-1])
tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name)
tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name)
if os.path.exists("%s.zip" % tx_fastqc_out):
os.remove("%s.zip" % tx_fastqc_out)
if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file):
utils.safe_makedir(fastqc_out)
shutil.move(os.path.join(tx_fastqc_out, "fastqc_data.txt"), fastqc_out)
shutil.move(tx_combo_file, sentry_file)
elif not os.path.exists(sentry_file):
if os.path.exists(fastqc_out):
shutil.rmtree(fastqc_out)
shutil.move(tx_fastqc_out, fastqc_out)
parser = FastQCParser(fastqc_out, data["name"][-1])
stats = parser.get_fastqc_summary()
parser.save_sections_into_file()
return stats
def _run_complexity(bam_file, data, out_dir):
try:
import pandas as pd
import statsmodels.formula.api as sm
except ImportError:
return {"Unique Starts Per Read": "NA"}
SAMPLE_SIZE = 1000000
base, _ = os.path.splitext(os.path.basename(bam_file))
utils.safe_makedir(out_dir)
out_file = os.path.join(out_dir, base + ".pdf")
df = bcbio.rnaseq.qc.starts_by_depth(bam_file, data["config"], SAMPLE_SIZE)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tmp_out_file:
df.plot(x='reads', y='starts', title=bam_file + " complexity")
fig = plt.gcf()
fig.savefig(tmp_out_file)
print "file saved as", out_file
print "out_dir is", out_dir
return bcbio.rnaseq.qc.estimate_library_complexity(df)
# ## Qualimap
def _parse_num_pct(k, v):
num, pct = v.split(" / ")
return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()}
def _parse_qualimap_globals(table):
"""Retrieve metrics of interest from globals table.
"""
out = {}
want = {"Mapped reads": _parse_num_pct,
"Duplication rate": lambda k, v: {k: v}}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col in want:
out.update(want[col](col, val))
return out
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out
def _parse_qualimap_insertsize(table):
"""Parse insert size metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Median":
out["Insert size (Median)"] = val
return out
def _parse_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = {"Globals": _parse_qualimap_globals,
"Globals (inside of regions)": _parse_qualimap_globals_inregion,
"Coverage": _parse_qualimap_coverage,
"Coverage (inside of regions)": _parse_qualimap_coverage,
"Insert size": _parse_qualimap_insertsize,
"Insert size (inside of regions)": _parse_qualimap_insertsize}
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(parsers[header](table))
new_names = []
for metric in out:
new_names.append(metric + "_qualimap_1e7reads_est")
out = dict(zip(new_names, out.values()))
return out
def _bed_to_bed6(orig_file, out_dir):
"""Convert bed to required bed6 inputs.
"""
bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file)))
if not utils.file_exists(bed6_file):
with open(bed6_file, "w") as out_handle:
for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)):
region = [x for x in list(region) if x]
fillers = [str(i), "1.0", "+"]
full = region + fillers[:6 - len(region)]
out_handle.write("\t".join(full) + "\n")
return bed6_file
def _run_qualimap(bam_file, data, out_dir):
"""Run qualimap to assess alignment quality metrics.
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
if not os.path.exists(report_file):
ds_bam = bam.downsample(bam_file, data, 1e7)
bam_file = ds_bam if ds_bam else bam_file
utils.safe_makedir(out_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
qualimap = config_utils.get_program("qualimap", data["config"])
resources = config_utils.get_resources("qualimap", data["config"])
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} bamqc -bam {bam_file} -outdir {out_dir} "
"-nt {num_cores} --java-mem-size={max_mem}")
species = data["genome_resources"]["aliases"].get("ensembl", "").upper()
if species in ["HUMAN", "MOUSE"]:
cmd += " -gd {species}"
regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data)
if regions:
bed6_regions = _bed_to_bed6(regions, out_dir)
cmd += " -gff {bed6_regions}"
do.run(cmd.format(**locals()), "Qualimap: %s" % data["name"][-1])
return _parse_qualimap_metrics(report_file)
# ## RNAseq Qualimap
def _parse_metrics(metrics):
# skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)
missing = set(["Genes Detected", "Transcripts Detected",
"Mean Per Base Cov."])
correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
"Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate",
"Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0,
"No feature assigned": 0, "Duplication Rate of Mapped": 1,
"Fragment Length Mean": 1,
"rRNA": 1, "Ambiguou alignment": 0})
total = ["Not aligned", "Aligned to genes", "No feature assigned"]
out = {}
total_reads = sum([int(metrics[name]) for name in total])
out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads
out['Mapped'] = sum([int(metrics[name]) for name in total[1:]])
out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads
[out.update({name: 0}) for name in missing]
[metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct]
for name in to_change:
if not to_change[name]:
continue
if to_change[name] == 1:
out.update({name: float(metrics[name])})
else:
out.update({to_change[name]: float(metrics[name])})
return out
def _detect_duplicates(bam_file, out_dir, config):
"""
Detect duplicates metrics with Picard
"""
out_file = os.path.join(out_dir, "dup_metrics")
if not utils.file_exists(out_file):
broad_runner = broad.runner_from_config(config)
(dup_align_bam, metrics_file) = broad_runner.run_fn("picard_mark_duplicates", bam_file, remove_dups=True)
shutil.move(metrics_file, out_file)
metrics = []
with open(out_file) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if line and not line[0].startswith("#"):
metrics.append(line)
metrics = dict(zip(metrics[0], metrics[1]))
return {"Duplication Rate of Mapped": metrics["PERCENT_DUPLICATION"]}
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
c, bio, source, s, e = line.split("\t")[:5]
if bio.startswith("rRNA"):
out_handle.write(("{0}:{1}-{2}\n").format(c, s, e))
def _detect_rRNA(config, bam_file, rRNA_file, ref_file, out_dir, single_end):
"""
Calculate rRNA with gatk-framework
"""
if not utils.file_exists(rRNA_file):
return {'rRNA': 0}
out_file = os.path.join(out_dir, "rRNA.counts")
if not utils.file_exists(out_file):
out_file = _count_rRNA_reads(bam_file, out_file, ref_file, rRNA_file, single_end, config)
with open(out_file) as in_handle:
for line in in_handle:
if line.find("CountReads counted") > -1:
rRNA_reads = line.split()[6]
break
return {'rRNA': rRNA_reads}
def _count_rRNA_reads(in_bam, out_file, ref_file, rRNA_interval, single_end, config):
"""Use GATK counter to count reads in rRNA genes
"""
bam.index(in_bam, config)
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
rRNA_coor = os.path.join(os.path.dirname(out_file), "rRNA.list")
_transform_browser_coor(rRNA_interval, rRNA_coor)
params = ["-T", "CountReads",
"-R", ref_file,
"-I", in_bam,
"-log", tx_out_file,
"-L", rRNA_coor,
"--filter_reads_with_N_cigar",
"-allowPotentiallyMisencodedQuals"]
jvm_opts = broad.get_gatk_framework_opts(config)
cmd = [config_utils.get_program("gatk-framework", config)] + jvm_opts + params
do.run(cmd, "counts rRNA for %s" % in_bam)
return out_file
def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
col = col.replace(":", "").strip()
val = val.replace(",", "")
m = {col: val}
if val.find("/") > -1:
m = _parse_num_pct(col, val.replace("%", ""))
out.update(m)
return out
def _parse_rnaseq_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"]
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(_parse_qualimap_rnaseq(table))
return out
def _rnaseq_qualimap(bam_file, data, out_dir):
"""
Run qualimap for a rnaseq bam file and parse results
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
config = data["config"]
gtf_file = dd.get_gtf_file(data)
ref_file = dd.get_ref_file(data)
single_end = not bam.is_paired(bam_file)
if not utils.file_exists(report_file):
utils.safe_makedir(out_dir)
bam.index(bam_file, config)
cmd = _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file, single_end)
do.run(cmd, "Qualimap for {}".format(data["name"][-1]))
metrics = _parse_rnaseq_qualimap_metrics(report_file)
metrics.update(_detect_duplicates(bam_file, out_dir, config))
metrics.update(_detect_rRNA(config, bam_file, gtf_file, ref_file, out_dir, single_end))
metrics.update({"Fragment Length Mean": bam.estimate_fragment_size(bam_file)})
metrics = _parse_metrics(metrics)
return metrics
def _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file=None, single_end=None):
"""
Create command lines for qualimap
"""
qualimap = config_utils.get_program("qualimap", config)
resources = config_utils.get_resources("qualimap", config)
num_cores = resources.get("cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "4G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} rnaseq -outdir {out_dir} -a proportional -bam {bam_file} "
"-gtf {gtf_file} --java-mem-size={max_mem}").format(**locals())
return cmd
# ## Lightweight QC approaches
def _parse_bamtools_stats(stats_file):
out = {}
want = set(["Total reads", "Mapped reads", "Duplicates", "Median insert size"])
with open(stats_file) as in_handle:
for line in in_handle:
parts = line.split(":")
if len(parts) == 2:
metric, stat_str = parts
metric = metric.split("(")[0].strip()
if metric in want:
stat_parts = stat_str.split()
if len(stat_parts) == 2:
stat, pct = stat_parts
pct = pct.replace("(", "").replace(")", "")
else:
stat = stat_parts[0]
pct = None
out[metric] = stat
if pct:
out["%s pct" % metric] = pct
return out
def _parse_offtargets(bam_file):
"""
Add to metrics off-targets reads if it exitst
"""
off_target = bam_file.replace(".bam", "-offtarget-stats.yaml")
if os.path.exists(off_target):
res = yaml.load(open(off_target))
return res
return {}
def _run_bamtools_stats(bam_file, data, out_dir):
"""Run bamtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file = os.path.join(out_dir, "bamtools_stats.txt")
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
bamtools = config_utils.get_program("bamtools", data["config"])
with file_transaction(data, stats_file) as tx_out_file:
cmd = "{bamtools} stats -in {bam_file}"
if bam.is_paired(bam_file):
cmd += " -insert"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "bamtools stats", data)
out = _parse_bamtools_stats(stats_file)
out.update(_parse_offtargets(bam_file))
return out
## Variant statistics from gemini
def _run_gemini_stats(bam_file, data, out_dir):
"""Retrieve high level variant statistics from Gemini.
"""
out = {}
gemini_dbs = [d for d in
[tz.get_in(["population", "db"], x) for x in data.get("variants", [])] if d]
if len(gemini_dbs) > 0:
gemini_db = gemini_dbs[0]
gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0]
if not utils.file_uptodate(gemini_stat_file, gemini_db):
gemini = config_utils.get_program("gemini", data["config"])
tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db])
gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db])
dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q",
"SELECT count(*) FROM variants WHERE in_dbsnp==1"])
out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1]
for line in gt_counts.split("\n"):
parts = line.rstrip().split()
if len(parts) > 0 and parts[0] != "sample":
name, hom_ref, het, hom_var, _, total = parts
out[name] = {}
out[name]["Variations (heterozygous)"] = int(het)
out[name]["Variations (homozygous)"] = int(hom_var)
# same total variations for all samples, keep that top level as well.
out["Variations (total)"] = int(total)
out["Variations (in dbSNP)"] = int(dbsnp_count.strip())
if out.get("Variations (total)") > 0:
out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] /
float(out["Variations (total)"]) * 100.0)
with open(gemini_stat_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
else:
with open(gemini_stat_file) as in_handle:
out = yaml.safe_load(in_handle)
res = {}
for k, v in out.iteritems():
if not isinstance(v, dict):
res.update({k: v})
if k == data['name'][-1]:
res.update(v)
return res
## qsignature
def _run_qsignature_generator(bam_file, data, out_dir):
""" Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (dict) dict with the normalize vcf file
"""
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in(['genome_build'], data))
return {}
jvm_opts = "-Xms750m -Xmx2g"
limit_reads = 20000000
if mixup_check == "qsignature_full":
slice_bam = bam_file
jvm_opts = "-Xms750m -Xmx8g"
limit_reads = 100000000
else:
slice_bam = _slice_chr22(bam_file, data)
qsig = config_utils.get_program("qsignature", data["config"])
if not qsig:
return {}
utils.safe_makedir(out_dir)
out_name = os.path.basename(slice_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_file} ")
if not os.path.exists(out_file):
down_file = bam.downsample(slice_bam, data, limit_reads)
if not down_file:
down_file = slice_bam
file_qsign_out = "{0}.qsig.vcf".format(down_file)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % data["name"][-1])
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return {'qsig_vcf': out_file}
return {}
def qsignature_summary(*samples):
"""Run SignatureCompareRelatedSimple module from qsignature tool.
Creates a matrix of pairwise comparison among samples. The
function will not run if the output exists
:param samples: list with only one element containing all samples information
:returns: (dict) with the path of the output to be joined to summary
"""
warnings, similar = [], []
qsig = config_utils.get_program("qsignature", samples[0][0]["config"])
if not qsig:
return [[]]
jvm_opts = "-Xms750m -Xmx8g"
work_dir = samples[0][0]["dirs"]["work"]
count = 0
for data in samples:
data = data[0]
vcf = tz.get_in(["summary", "metrics", "qsig_vcf"], data)
if vcf:
count += 1
vcf_name = data["name"][-1] + ".qsig.vcf"
out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature"))
if not os.path.lexists(os.path.join(out_dir, vcf_name)):
os.symlink(vcf, os.path.join(out_dir, vcf_name))
if count > 0:
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
out_file = os.path.join(qc_out_dir, "qsignature.xml")
out_ma_file = os.path.join(qc_out_dir, "qsignature.ma")
out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings")
log = os.path.join(work_dir, "qsignature", "qsig-summary.log")
if not os.path.exists(out_file):
with file_transaction(samples[0][0], out_file) as file_txt_out:
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureCompareRelatedSimple "
"-log {log} -dir {out_dir} "
"-o {file_txt_out} ")
do.run(base_cmd.format(**locals()), "qsignature score calculation")
error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file,
out_warn_file, samples[0][0])
return [{'total samples': count,
'similar samples pairs': len(similar),
'warnings samples pairs': len(warnings),
'error samples': list(error),
'out_dir': qc_out_dir}]
else:
return []
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = lxml.etree.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar
def _slice_chr22(in_bam, data):
"""
return only one BAM file with only chromosome 22
"""
sambamba = config_utils.get_program("sambamba", data["config"])
out_file = "%s-chr%s" % os.path.splitext(in_bam)
if not utils.file_exists(out_file):
bam.index(in_bam, data['config'])
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
chromosome = "22"
if "chr22" in bam_contigs:
chromosome = "chr22"
with file_transaction(data, out_file) as tx_out_file:
cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return out_file
| mit |
prasadtalasila/MailingListParser | lib/input/mbox/keyword_clustering.py | 1 | 7021 | import json
import mailbox
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from lib.analysis.author import ranking
from lib.util import custom_stopwords
from lib.util.read import *
def get_top_authors(top_n, json_filename):
"""
Gets top n authors based on the ranking generated from generate_author_ranking in analysis.author.ranking
:param top_n: Number of top authors to be returned.
:param json_filename: The JSON file from which author scores are generated.
:return: Top authors and indices
"""
top_authors = set()
top_authors_index = dict()
author_scores = ranking.get(json_filename, output_filename=None, active_score=2, passive_score=1, write_to_file=False)
index = 0
for email_addr, author_score in author_scores:
index += 1
top_authors.add(email_addr)
top_authors_index[email_addr] = index
if index == top_n:
break
return top_authors, top_authors_index
def save_sparse_csr(filename, array):
"""
This function writes a numpy matrix to a file in a sparse format.
:param filename: The file to store the matrix.
:param array: The numpy array.
"""
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
def get_message_body(message):
"""
Gets the message body of the message.
:param message: The message whose body is to be extracted.
:return: Message Body
"""
msg_body = None
if message.is_multipart():
for part in message.walk():
if part.is_multipart():
for subpart in part.walk():
msg_body = subpart.get_payload(decode=False)
else:
msg_body = part.get_payload(decode=False)
else:
msg_body = message.get_payload(decode=False)
msg_body = msg_body.splitlines()
for num in range(len(msg_body)):
if msg_body[num]:
if msg_body[num] == "---":
msg_body = msg_body[:num]
break
if msg_body[num][0] == '>' or msg_body[num][0] == '+' or msg_body[num][0] == '-' or msg_body[num][0] == '@':
msg_body[num] = ""
if num > 0:
msg_body[num - 1] = ""
elif msg_body[num][:3] == "Cc:":
msg_body[num] = ""
elif msg_body[num][:14] == "Signed-off-by:":
msg_body[num] = ""
elif msg_body[num][:9] == "Acked-by:":
msg_body[num] = ""
elif msg_body[num][:5] == "From:":
msg_body[num] = ""
elif msg_body[num][:10] == "Tested-by:":
msg_body[num] = ""
elif msg_body[num][:12] == "Reported-by:":
msg_body[num] = ""
elif msg_body[num][:12] == "Reviewed-by:":
msg_body[num] = ""
elif msg_body[num][:5] == "Link:":
msg_body[num] = ""
elif msg_body[num][:13] == "Suggested-by:":
msg_body[num] = ""
msg_body = [x.strip() for x in msg_body]
msg_body = [x for x in msg_body if x != ""]
msg_body = '\n'.join(msg_body)
return msg_body
def generate_kmeans_clustering(mbox_filename, output_filename, author_uid_filename, json_filename, top_n = None):
"""
From the .MBOX file, this function extracts the email content is extracted using two predefined classes
available in the Python Standard Library: Mailbox and Message. Feature vectors are created for all the authors
by obtaining meaningful words from the mail content, after removing the stop words, using NLTK libraries.
The words obtained are transformed using stemming or lemmatization before adding these words to the word list of
the corresponding authors. A matrix is created out of these word lists such that row set is the union of terms of
all the authors and the column set contains the authors. If a term does not appear in a document, the corresponding
matrix entry would be zero. The resulting matrix is called term-document matrix. Then tf-idf analysis is performed
on the term-document matrix. Finally the top-10 words of each author is listed by their weight values.
Each entry corresponds to the tf-idf normalized coefficient of the keyword for a user. If a keyword is not present
in the top-10 keywords of a user, then the corresponding matrix entry would be zero. Also returns the feature names.
:param mbox_filename: Contains the absolute or relative address of the MBOX file to be opened.
:return: Term Document Matrix: The columns of the matrix are the users and the rows of the matrix are the keywords.
"""
english_stopwords = set(stopwords.words('english')) | custom_stopwords.common_words | custom_stopwords.custom_words
email_re = re.compile(r'[\w\.-]+@[\w\.-]+')
wnl = WordNetLemmatizer()
print("Reading messages from MBOX file...")
mailbox_obj = mailbox.mbox(mbox_filename)
with open(author_uid_filename, 'r') as map_file:
author_uid_map = json.load(map_file)
map_file.close()
top_n = min(len(author_uid_map), top_n)
top_authors, top_authors_index = get_top_authors(top_n, json_filename)
keywords_list = [list() for x in range(top_n+1)]
i = 0 # Number of emails processed
for message in mailbox_obj:
temp = email_re.search(str(message['From']))
from_addr = temp.group(0) if temp is not None else message['From']
if top_n is not None and from_addr not in top_authors:
continue
if top_n is None and from_addr not in author_uid_map.keys():
continue
msg_body = get_message_body(message)
if from_addr is None:
from_addr = message['From']
msg_tokens = [x.lower() for x in re.sub('\W+', ' ', msg_body).split() if 2 < len(x) < 30]
# Toggle comment below if numbers and underscores should also be removed.
# msg_tokens = [x for x in re.sub('[^a-zA-Z]+', ' ', msg_body).split() if 2 < len(x) < 30]
msg_tokens = [wnl.lemmatize(x) for x in msg_tokens if not x.isdigit() and x not in from_addr]
msg_tokens = [x for x in msg_tokens if x not in english_stopwords]
keywords_list[top_authors_index[from_addr]].extend(msg_tokens)
i += 1
if not i % 10000:
print(i, "of", len(mailbox_obj), "messages processed.")
for num in range(len(keywords_list)):
keywords_list[num] = " ".join(keywords_list[num])
print("Performing tf-idf analysis on the term-document matrix...")
vectorizer = TfidfVectorizer(analyzer='word', stop_words=english_stopwords, max_features=200000,
use_idf=True, ngram_range=(1, 4))
tfidf_matrix = vectorizer.fit_transform(keywords_list).toarray()
# with open("author_top_index.json", 'w') as json_file:
# json.dump(top_authors_index, json_file)
# print(feature_names)
kmeans_classifier = KMeans(n_clusters=8, n_init=4)
labels = kmeans_classifier.fit_predict(tfidf_matrix)
clustering = dict()
for i in range(len(labels)):
x = None
for k, v in author_uid_map.items():
if v == i:
x = k
if clustering.get(str(labels[i]), None) is None:
clustering[str(labels[i])] = [x]
else:
clustering[str(labels[i])].append(x)
with open(output_filename, 'w') as out_file:
json.dump(clustering, out_file)
out_file.close()
| gpl-3.0 |
gviejo/ThalamusPhysio | python/main_test_final_classification_XGB_KL.py | 1 | 15477 | import ternary
import numpy as np
import pandas as pd
from functions import *
import sys
from functools import reduce
from sklearn.manifold import *
from sklearn.cluster import *
from sklearn.linear_model import *
from sklearn.ensemble import *
from pylab import *
import _pickle as cPickle
from skimage.filters import gaussian
from sklearn.model_selection import cross_val_score
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
import xgboost as xgb
from scipy.stats import entropy
def xgb_decodage(Xr, Yr, Xt, n_class):
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
params = {'objective': "multi:softprob",
'eval_metric': "mlogloss", #loglikelihood loss
'seed': np.random.randint(1, 10000), #for reproducibility
'silent': 1,
'learning_rate': 0.01,
'min_child_weight': 2,
'n_estimators': 100,
# 'subsample': 0.5,
'max_depth': 5,
'gamma': 0.5,
'num_class':n_class}
num_round = 1000
bst = xgb.train(params, dtrain, num_round)
ymat = bst.predict(dtest)
return ymat
def fit_cv(X, Y, n_cv=10, verbose=1, shuffle = False):
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
cv_kf = KFold(n_splits=n_cv, shuffle=True, random_state=42)
skf = cv_kf.split(X)
n_class = len(np.unique(Y))
Y_hat = np.zeros((len(Y),n_class))*np.nan
for idx_r, idx_t in skf:
Xr = np.copy(X[idx_r, :])
Yr = np.copy(Y[idx_r])
Xt = np.copy(X[idx_t, :])
Yt = np.copy(Y[idx_t])
if shuffle: np.random.shuffle(Yr)
Yt_hat = xgb_decodage(Xr, Yr, Xt, n_class)
Y_hat[idx_t] = Yt_hat
return Y_hat
############################################################################################################
# LOADING DATA
############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
burstiness = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
lambdaa = pd.read_hdf("/mnt/DataGuillaume/MergedData/LAMBDA_AUTOCORR.h5")[('rem', 'b')]
lambdaa = lambdaa[np.logical_and(lambdaa>0.0,lambdaa<30.0)]
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
# rippower = pd.read_hdf("../figures/figures_articles/figure2/power_ripples_2.h5")
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
swr_phase = pd.read_hdf("/mnt/DataGuillaume/MergedData/SWR_PHASE.h5")
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:150]
autocorr_rem = autocorr_rem[2:150]
autocorr_sws = autocorr_sws[2:150]
# HISTOGRAM THETA
theta_hist = pd.read_hdf("/mnt/DataGuillaume/MergedData/THETA_THAL_HISTOGRAM_2.h5")
theta_hist = theta_hist.rolling(window = 5, win_type='gaussian', center = True, min_periods=1).mean(std=1.0)
theta_wak = theta_hist.xs(('wak'), 1, 1)
theta_rem = theta_hist.xs(('rem'), 1, 1)
# AUTOCORR LONG
store_autocorr2 = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_LONG.h5")
autocorr2_wak = store_autocorr2['wak'].loc[0.5:]
autocorr2_rem = store_autocorr2['rem'].loc[0.5:]
autocorr2_sws = store_autocorr2['sws'].loc[0.5:]
autocorr2_wak = autocorr2_wak.rolling(window = 100, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 10.0)
autocorr2_rem = autocorr2_rem.rolling(window = 100, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 10.0)
autocorr2_sws = autocorr2_sws.rolling(window = 100, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 10.0)
autocorr2_wak = autocorr2_wak[2:2000]
autocorr2_rem = autocorr2_rem[2:2000]
autocorr2_sws = autocorr2_sws[2:2000]
############################################################################################################
# WHICH NEURONS
############################################################################################################
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
# neurons = reduce(np.intersect1d, (burstiness.index.values, theta.index.values, rippower.index.values, fr_index))
# neurons = reduce(np.intersect1d, (fr_index, autocorr_sws.columns, autocorr2_rem.columns, theta_rem.columns, swr.columns, lambdaa.index.values))
neurons = reduce(np.intersect1d, (fr_index, autocorr_sws.columns, autocorr_rem.columns, autocorr_wak.columns, swr.columns))
# neurons = np.array([n for n in neurons if 'Mouse17' in n])
# nucleus = ['AD', 'AM', 'AVd', 'AVv', 'VA', 'LDvl', 'CM']
# neurons = np.intersect1d(neurons, mappings.index[mappings['nucleus'].isin(nucleus)])
count_nucl = pd.DataFrame(columns = ['12', '17','20', '32'])
for m in ['12', '17','20', '32']:
subspace = pd.read_hdf("/mnt/DataGuillaume/MergedData/subspace_Mouse"+m+".hdf5")
nucleus = np.unique(subspace['nucleus'])
total = [np.sum(subspace['nucleus'] == n) for n in nucleus]
count_nucl[m] = pd.Series(index = nucleus, data = total)
nucleus = list(count_nucl.dropna().index.values)
allnucleus = list(np.unique(mappings.loc[neurons,'nucleus']))
tokeep = np.array([n for n in neurons if mappings.loc[n,'nucleus'] in nucleus])
############################################################################################################
# STACKING DIMENSIONS
############################################################################################################
# pc_short_rem = PCA(n_components=10).fit_transform(autocorr_rem[neurons].values.T)
# pc_short_wak = PCA(n_components=10).fit_transform(autocorr_wak[neurons].values.T)
# pc_short_sws = PCA(n_components=10).fit_transform(autocorr_sws[neurons].values.T)
# pc_short_rem = np.log((pc_short_rem - pc_short_rem.min(axis = 0))+1)
# pc_short_wak = np.log((pc_short_wak - pc_short_wak.min(axis = 0))+1)
# pc_short_sws = np.log((pc_short_sws - pc_short_sws.min(axis = 0))+1)
# pc_long = PCA(n_components=1).fit_transform(autocorr2_rem[neurons].values.T)
# pc_long = np.log((pc_long - pc_long.min(axis=0))+1)
# # pc_long = np.log(lambdaa.loc[neurons].values[:,np.newaxis])
# # pc_theta = np.hstack([np.cos(theta.loc[neurons,'phase']).values[:,np.newaxis],np.sin(theta.loc[neurons,'phase']).values[:,np.newaxis],np.log(theta.loc[neurons,'kappa'].values[:,np.newaxis])])
# pc_theta = np.hstack([np.log(theta.loc[neurons,'kappa'].values[:,np.newaxis])])
# pc_swr = np.hstack([np.log(rippower.loc[neurons].values[:,np.newaxis])])
# pc_theta = PCA(n_components=3).fit_transform(theta_rem[neurons].values.T)
# pc_theta = np.log((pc_theta - pc_theta.min(axis = 0))+1)
# pc_swr = PCA(n_components=3).fit_transform(swr[neurons].values.T)
# pc_swr = np.log((pc_swr - pc_swr.min(axis = 0))+1)
# pc_theta -= pc_theta.min(axis = 0)
# pc_swr -= pc_swr.min(axis = 0)
# pc_theta = np.log(pc_theta+1)
# pc_swr = np.log(pc_swr+1)
# data = []
# for tmp in [autocorr_sws[neurons].values.T,autocorr2_rem[neurons].values.T,theta_rem[neurons].values.T,swr[neurons].values.T]:
# tmp = tmp - tmp.min()
# tmp = tmp / tmp.max()
# data.append(tmp)
# data = np.hstack([pc_short_rem, pc_short_sws, pc_long, pc_short_wak, pc_long, pc_theta, pc_swr])
# data = np.hstack([pc_short_rem, pc_short_sws, pc_short_wak])
# data = np.hstack([pc_theta, pc_swr])
# data = np.vstack([ autocorr_wak[neurons].values,autocorr_rem[neurons].values,autocorr_sws[neurons].values]).T
data = np.vstack([ autocorr_wak[tokeep].values,autocorr_rem[tokeep].values,autocorr_sws[tokeep].values,
autocorr2_wak[tokeep].values,autocorr2_rem[tokeep].values,autocorr2_sws[tokeep].values,
theta_hist.xs(('wak'),1,1)[tokeep].values,theta_hist.xs(('rem'),1,1)[tokeep].values,
swr[tokeep].values]).T
labels = np.array([nucleus.index(mappings.loc[n,'nucleus']) for n in tokeep])
##########################################################################################################
# XGB
##########################################################################################################
# alldata = [ np.vstack([autocorr_wak[tokeep].values,autocorr_rem[tokeep].values,autocorr_sws[tokeep].values]),
# np.vstack([autocorr2_wak[tokeep].values,autocorr2_rem[tokeep].values,autocorr2_sws[tokeep].values]),
# np.vstack([theta_hist.xs(('wak'),1,1)[tokeep].values,theta_hist.xs(('rem'),1,1)[tokeep].values]),
# swr[tokeep].values
# ]
alldata = [ np.vstack([autocorr_wak[tokeep].values,autocorr_rem[tokeep].values,autocorr_sws[tokeep].values]).T,
swr[tokeep].values.T
]
# kl = pd.DataFrame(index = nucleus ,columns=pd.MultiIndex.from_product([['score', 'shuffle'],['auto','swr'], ['mean', 'sem']]))
# cols = np.unique(mean_score.columns.get_level_values(1))
n_repeat = 1000
n_cv = 10
_SQRT2 = np.sqrt(2)
def hellinger(p, q):
return np.sqrt(np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) / _SQRT2
####################################
# for the three exemple of figure 6
# nucleus2 = nucleus + ['CM']
# tokeep2 = np.array([n for n in neurons if mappings.loc[n,'nucleus'] in nucleus2])
# neurontoplot = ['Mouse12-120806_18', 'Mouse17-130202_24', 'Mouse12-120819_16']
# idx = [np.where(tokeep2 == n)[0][0] for n in neurontoplot]
# alldata2 = [ np.vstack([autocorr_wak[tokeep2].values,autocorr_rem[tokeep2].values,autocorr_sws[tokeep2].values]).T,
# swr[tokeep2].values.T
# ]
# labels2 = np.array([nucleus2.index(mappings.loc[n,'nucleus']) for n in tokeep2])
# proba_aut = fit_cv(alldata2[0], labels2, n_cv, verbose = 0)
# proba_swr = fit_cv(alldata2[1], labels2, n_cv, verbose = 0)
# store = pd.HDFStore("../figures/figures_articles/figure6/example_proba.h5", 'w')
# store.put("proba_aut", pd.DataFrame(data = proba_aut[idx].T, columns = neurontoplot, index = nucleus2))
# store.put("proba_swr", pd.DataFrame(data = proba_swr[idx].T, columns = neurontoplot, index = nucleus2))
# store.close()
###################################
proba_aut = fit_cv(alldata[0], labels, n_cv, verbose = 0)
proba_swr = fit_cv(alldata[1], labels, n_cv, verbose = 0)
HL = pd.Series(index = tokeep, data = np.array([hellinger(proba_swr[i],proba_aut[i]) for i in range(len(tokeep))]))
KL = pd.Series(index = tokeep, data = np.array([entropy(proba_swr[i],proba_aut[i]) for i in range(len(tokeep))]))
HLS = pd.DataFrame(index = tokeep, columns = np.arange(n_repeat))
KLS = pd.DataFrame(index = tokeep, columns = np.arange(n_repeat))
for i in range(n_repeat):
print(i)
proba_aut = fit_cv(alldata[0], labels, n_cv, verbose = 0, shuffle = False)
proba_swr = fit_cv(alldata[1], labels, n_cv, verbose = 0, shuffle = True)
tmp = pd.Series(index = tokeep, data = np.array([hellinger(proba_swr[i],proba_aut[i]) for i in range(len(tokeep))]))
HLS[i] = tmp
tmp = pd.Series(index = tokeep, data = np.array([entropy(proba_swr[i],proba_aut[i]) for i in range(len(tokeep))]))
KLS[i] = tmp
data_directory = '/mnt/DataGuillaume/MergedData/'
# store = pd.HDFStore("../figures/figures_articles/figure6/score_hellinger.h5", 'w')
store = pd.HDFStore(data_directory+'score_hellinger.h5', 'w')
store.put('HL', HL)
store.put('HLS', HLS)
store.put('KL', KL)
store.put('KLS', KLS)
store.close()
sys.exit()
# for i, m in enumerate(cols):
# data = alldata[i].T
# test_score = pd.DataFrame(index = np.arange(n_repeat), columns = pd.MultiIndex.from_product([['test','shuffle'], nucleus]))
# for j in range(n_repeat):
# test = fit_cv(data, labels, 10, verbose = 0)
# rand = fit_cv(data, labels, 10, verbose = 0, shuffle = True)
# print(i,j)
# for k, n in enumerate(nucleus):
# idx = labels == nucleus.index(n)
# test_score.loc[j,('test',n)] = np.sum(test[idx] == nucleus.index(n))/np.sum(labels == nucleus.index(n))
# test_score.loc[j,('shuffle',n)] = np.sum(rand[idx] == nucleus.index(n))/np.sum(labels == nucleus.index(n))
# mean_score[('score',m,'mean')] = test_score['test'].mean(0)
# mean_score[('score',m,'sem')] = test_score['test'].sem(0)
# mean_score[('shuffle',m,'mean')] = test_score['shuffle'].mean(0)
# mean_score[('shuffle',m,'sem')] = test_score['shuffle'].sem(0)
# mean_score = mean_score.sort_values(('score','auto', 'mean'))
# mean_score.to_hdf(data_directory+'SCORE_XGB.h5', 'mean_score')
##########################################################################################################
# KL DIVERGENCE
##########################################################################################################
###########################################################################################################
# LOOKING AT SPLITS
###########################################################################################################
# data = np.vstack(alldata).T
# dtrain = xgb.DMatrix(data, label=labels)
# params = {'objective': "multi:softprob",
# 'eval_metric': "mlogloss", #loglikelihood loss
# 'seed': 2925, #for reproducibility
# 'silent': 1,
# 'learning_rate': 0.05,
# 'min_child_weight': 2,
# 'n_estimators': 100,
# # 'subsample': 0.5,
# 'max_depth': 5,
# 'gamma': 0.5,
# 'num_class':len(nucleus)}
# num_round = 100
# bst = xgb.train(params, dtrain, num_round)
# splits = extract_tree_threshold(bst)
# features_id = np.hstack([np.ones(alldata[i].shape[0])*i for i in range(4)])
# features = np.zeros(data.shape[1])
# for k in splits: features[int(k[1:])] = len(splits[k])
figure()
ct = 0
for i, c in enumerate(cols):
bar(np.arange(len(nucleus))+ct, mean_score[('score',c, 'mean')].values.flatten(), 0.2)
bar(np.arange(len(nucleus))+ct, mean_score[('shuffle',c, 'mean')].values.flatten(), 0.2, alpha = 0.5)
xticks(np.arange(len(nucleus)), mean_score.index.values)
ct += 0.2
show()
# tmp = mean_score['score'] - mean_score['shuffle']
# tmp = tmp.sort_values('auto')
# figure()
# ct = 0
# for i, c in enumerate(cols):
# bar(np.arange(len(nucleus))+ct, tmp[c].values.flatten(), 0.2)
# xticks(np.arange(len(nucleus)), mean_score.index.values)
# ct += 0.2
# show()
# # mean_score = pd.read_hdf("../figures/figures_articles/figure6/mean_score.h5")
# # mean_score.to_hdf("../figures/figures_articles/figure6/mean_score.h5", 'xgb')
# figure()
# ct = 0
# for i, c in enumerate(cols):
# bar(np.arange(len(nucleus))+ct, mean_score[('score',c )].values.flatten(), 0.2)
# bar(np.arange(len(nucleus))+ct, mean_score[('shuffle',c)].values.flatten(), 0.2, alpha = 0.5)
# xticks(np.arange(len(nucleus)), mean_score.index.values)
# ct += 0.2
# show()
| gpl-3.0 |
quantumlib/Cirq | cirq-core/cirq/experiments/cross_entropy_benchmarking_test.py | 1 | 5377 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import pytest
import cirq
from cirq.experiments import (
CrossEntropyResult,
CrossEntropyResultDict,
cross_entropy_benchmarking,
build_entangling_layers,
)
from cirq.experiments.cross_entropy_benchmarking import CrossEntropyPair, SpecklePurityPair
def test_cross_entropy_benchmarking():
# Check that the fidelities returned from a four-qubit XEB simulation are
# close to 1 (deviations from 1 is expected due to finite number of
# measurements).
simulator = cirq.Simulator()
qubits = cirq.GridQubit.square(2)
# Build a sequence of CZ gates.
interleaved_ops = build_entangling_layers(qubits, cirq.CZ ** 0.91)
# Specify a set of single-qubit rotations. Pick prime numbers for the
# exponent to avoid evolving the system into a basis state.
single_qubit_rots = [
[cirq.X ** 0.37],
[cirq.Y ** 0.73, cirq.X ** 0.53],
[cirq.Z ** 0.61, cirq.X ** 0.43],
[cirq.Y ** 0.19],
]
# Simulate XEB using the default single-qubit gate set without two-qubit
# gates, XEB using the specified single-qubit gate set without two-qubit
# gates, and XEB using the specified single-qubit gate set with two-qubit
# gate. Check that the fidelities are close to 1.0 in all cases. Also,
# check that a single XEB fidelity is returned if a single cycle number
# is specified.
results_0 = cross_entropy_benchmarking(
simulator, qubits, num_circuits=3, repetitions=1000, cycles=range(4, 20, 5)
)
results_1 = cross_entropy_benchmarking(
simulator,
qubits,
num_circuits=3,
repetitions=1000,
cycles=[4, 8, 12],
scrambling_gates_per_cycle=single_qubit_rots,
)
results_2 = cross_entropy_benchmarking(
simulator,
qubits,
benchmark_ops=interleaved_ops,
num_circuits=3,
repetitions=1000,
cycles=[4, 8, 12],
scrambling_gates_per_cycle=single_qubit_rots,
)
results_3 = cross_entropy_benchmarking(
simulator,
qubits,
benchmark_ops=interleaved_ops,
num_circuits=3,
repetitions=1000,
cycles=15,
scrambling_gates_per_cycle=single_qubit_rots,
)
fidelities_0 = [datum.xeb_fidelity for datum in results_0.data]
fidelities_1 = [datum.xeb_fidelity for datum in results_1.data]
fidelities_2 = [datum.xeb_fidelity for datum in results_2.data]
fidelities_3 = [datum.xeb_fidelity for datum in results_3.data]
assert np.isclose(np.mean(fidelities_0), 1.0, atol=0.1)
assert np.isclose(np.mean(fidelities_1), 1.0, atol=0.1)
assert np.isclose(np.mean(fidelities_2), 1.0, atol=0.1)
assert len(fidelities_3) == 1
# Sanity test that plot runs.
ax = plt.subplot()
results_1.plot(ax)
def test_cross_entropy_result_depolarizing_models():
prng = np.random.RandomState(59566)
S = 0.8
p = 0.99
data = [
CrossEntropyPair(num_cycle=d, xeb_fidelity=S * p ** d + prng.normal(scale=0.01))
for d in range(10, 211, 20)
]
purity_data = [
SpecklePurityPair(num_cycle=d, purity=S * p ** (2 * d) + prng.normal(scale=0.01))
for d in range(10, 211, 20)
]
result = CrossEntropyResult(data=data, repetitions=1000, purity_data=purity_data)
model = result.depolarizing_model()
purity_model = result.purity_depolarizing_model()
np.testing.assert_allclose(model.spam_depolarization, S, atol=1e-2)
np.testing.assert_allclose(model.cycle_depolarization, p, atol=1e-2)
np.testing.assert_allclose(purity_model.purity, p ** 2, atol=1e-2)
def test_cross_entropy_result_repr():
result1 = CrossEntropyResult(
data=[CrossEntropyPair(2, 0.9), CrossEntropyPair(5, 0.5)], repetitions=1000
)
result2 = CrossEntropyResult(
data=[CrossEntropyPair(2, 0.9), CrossEntropyPair(5, 0.5)],
repetitions=1000,
purity_data=[SpecklePurityPair(2, 0.8), SpecklePurityPair(5, 0.3)],
)
cirq.testing.assert_equivalent_repr(result1)
cirq.testing.assert_equivalent_repr(result2)
def test_cross_entropy_result_dict_repr():
pair = tuple(cirq.LineQubit.range(2))
result = CrossEntropyResult(
data=[CrossEntropyPair(2, 0.9), CrossEntropyPair(5, 0.5)], repetitions=1000
)
result_dict = CrossEntropyResultDict(results={pair: result})
cirq.testing.assert_equivalent_repr(result_dict)
def test_cross_entropy_result_purity_model_fails_with_no_data():
data = [
CrossEntropyPair(num_cycle=2, xeb_fidelity=0.9),
CrossEntropyPair(num_cycle=4, xeb_fidelity=0.8),
]
result = CrossEntropyResult(data=data, repetitions=1000)
with pytest.raises(ValueError):
_ = result.purity_depolarizing_model()
| apache-2.0 |
shijieice/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
poidl/yassy | doc/python/frei_appendix_B1.py | 1 | 1579 | #!/bin/python
# pylint: disable=C0103
"""Python translation of Frei Appendix B1."""
# Frei, B.: Digital sound generation. Institute for Computer Music and
# Sound Technology (ICST) Zurich University of the Arts.
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# parameters
fs = 48000
fc = 18300
rlen = 10
ppiv = 100
beta = 9
apof = 0.9
apobeta = 0.7
pts = ppiv * rlen + 1
x1 = np.arange(pts)
x2 = rlen * 2 * (x1 - (pts - 1) / 2 + 0.00001) / (pts - 1)
x3 = np.pi * fc / fs * x2
h = np.sin(x3) / x3
w = np.kaiser(pts, beta)
g = w * h
# apodization and normalization
aw = 1 - apof * np.kaiser(pts, apobeta)
g = aw * g
g = g / max(g)
# diagrams
figname = 'frei_appendixB1a.svg'
fig = plt.figure()
plt.plot(x2 / 2, g)
plt.xlim(-rlen / 2, rlen / 2)
plt.ylim(- 0.2, 1.0001)
plt.xlabel('Time in Sampling Intervals')
plt.title('Bandlimited Impulse')
plt.grid()
fig.savefig('../figures/' + figname)
zpad = 20
g2 = np.concatenate([g, np.zeros((zpad - 1) * pts)])
wspec = np.abs(np.fft.rfft(g2, norm="ortho"))
wspec = wspec / max(wspec)
# cut = 0.00001
# wspec[wspec > cut] = cut
fmax = 60000
rng = round(rlen * zpad * fmax / fs)
xidx = np.arange(rng + 1)
figname = 'frei_appendixB1b.svg'
fig = plt.figure()
plt.semilogy((fmax / 1000) * xidx / rng, wspec[: (rng + 1)])
plt.ylim(1e-5, 1)
plt.xlabel('Frequency in kHz')
plt.title('Amplitude Spectrum')
plt.grid()
# markers at 20 kHz, fs - 20 kHz and fs
plt.axvline(20, color="g")
plt.axvline(fs / 1000 - 20, color="r")
plt.axvline(fs / 1000, color="r")
fig.savefig('../figures/' + figname)
| gpl-2.0 |
numenta-archive/htmresearch | projects/vehicle-control/agent/run_sm.py | 6 | 7819 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from nupic.encoders.coordinate import CoordinateEncoder
from nupic.encoders.scalar import ScalarEncoder
from nupic.algorithms.monitor_mixin.trace import CountsTrace
from sensorimotor.extended_temporal_memory import ApicalTiebreakPairMemory
from htmresearch.support.apical_tm_pair_monitor_mixin import (
ApicalTMPairMonitorMixin)
class MonitoredApicalTiebreakPairMemory(
ApicalTMPairMonitorMixin, ApicalTiebreakPairMemory): pass
SCALE = 5
RADIUS = 10
class Agent(object):
def __init__(self):
self.encoder = CoordinateEncoder(n=1024,
w=21)
self.motorEncoder = ScalarEncoder(21, -1, 1,
n=1024)
self.tm = MonitoredApicalTiebreakPairMemory(
columnDimensions=[2048],
basalInputDimensions: (999999,) # Dodge input checking.
cellsPerColumn=1,
initialPermanence=0.5,
connectedPermanence=0.6,
permanenceIncrement=0.1,
permanenceDecrement=0.02,
minThreshold=35,
activationThreshold=35,
maxNewSynapseCount=40)
self.plotter = Plotter(self.tm, showOverlaps=False, showOverlapsValues=False)
self.lastState = None
self.lastAction = None
self.prevMotorPattern = ()
def sync(self, outputData):
if not ("location" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
reset = outputData.get("reset") or False
if reset:
print "Reset."
self.tm.reset()
location = outputData["location"]
steer = outputData["steer"]
x = int(location["x"] * SCALE)
z = int(location["z"] * SCALE)
coordinate = numpy.array([x, z])
encoding = self.encoder.encode((coordinate, RADIUS))
motorEncoding = self.motorEncoder.encode(steer)
sensorPattern = set(encoding.nonzero()[0])
motorPattern = set(motorEncoding.nonzero()[0])
self.tm.compute(sensorPattern,
activeCellsExternalBasal=motorPattern,
reinforceCandidatesExternalBasal=self.prevMotorPattern,
growthCandidatesExternalBasal=self.prevMotorPattern)
print self.tm.mmPrettyPrintMetrics(self.tm.mmGetDefaultMetrics())
self.plotter.update(encoding, reset)
if reset:
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
self.prevMotorPattern = motorPattern
class Plotter(object):
def __init__(self, tm, showOverlaps=False, showOverlapsValues=False):
self.tm = tm
self.showOverlaps = showOverlaps
self.showOverlapsValues = showOverlapsValues
self.encodings = []
self.resets = []
self.numSegmentsPerCell = []
self.numSynapsesPerSegment = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
if self.showOverlaps and self.showOverlapsValues:
rcParams.update({'figure.figsize': (20, 20)})
else:
rcParams.update({'figure.figsize': (6, 12)})
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
rcParams.update({'ytick.labelsize': 8})
def update(self, encoding, reset):
self.encodings.append(encoding)
self.resets.append(reset)
# TODO: Deal with empty segments / unconnected synapses
numSegmentsPerCell = [len(segments) for segments in
self.tm.connections._segmentsForCell.values()]
self.numSegmentsPerCell.append(numpy.array(numSegmentsPerCell))
numSynapsesPerSegment = [len(synapses) for synapses in
self.tm.connections._synapsesForSegment.values()]
self.numSynapsesPerSegment.append(numpy.array(numSynapsesPerSegment))
def render(self):
timestamp = int(time.time())
self.plt.figure(1)
self.plt.clf()
self._renderMetrics(timestamp)
if self.showOverlaps:
self.plt.figure(2)
self.plt.clf()
self._renderOverlaps(timestamp)
def _renderMetrics(self, timestamp):
traces = self.tm.mmGetDefaultTraces()
traces = [trace for trace in traces if type(trace) is CountsTrace]
t = len(traces)
n = t + 2
for i in xrange(t):
trace = traces[i]
self.plt.subplot(n, 1, i+1)
self._plot(trace.data, trace.title)
self.plt.subplot(n, 1, t+1)
self._plotDistributions(self.numSegmentsPerCell, "# segments per cell")
self.plt.subplot(n, 1, t+2)
self._plotDistributions(self.numSynapsesPerSegment, "# synapses per segment")
self.plt.draw()
self.plt.savefig("sm-{0}_A.png".format(timestamp))
def _renderOverlaps(self, timestamp):
self.plt.subplot(1, 1, 1)
overlaps = self._computeOverlaps()
self._imshow(overlaps, "Overlaps", aspect=None)
for i in self._computeResetIndices():
self.plt.axvline(i, color='black', alpha=0.5)
self.plt.axhline(i, color='black', alpha=0.5)
if self.showOverlapsValues:
for i in range(len(overlaps)):
for j in range(len(overlaps[i])):
overlap = "%.1f" % overlaps[i][j]
self.plt.annotate(overlap, xy=(i, j), fontsize=6, color='red', verticalalignment='center', horizontalalignment='center')
self.plt.draw()
self.plt.savefig("sm-{0}_B.png".format(timestamp))
def _computeOverlaps(self):
overlaps = []
encodings = self.encodings
for i in range(len(encodings)):
row = []
for j in range(len(encodings)):
n = max(encodings[i].sum(), encodings[j].sum())
overlap = (encodings[i] & encodings[j]).sum() / float(n)
row.append(overlap)
overlaps.append(row)
return overlaps
def _computeResetIndices(self):
return numpy.array(self.resets).nonzero()[0]
def _plot(self, data, title):
self.plt.plot(range(len(data)), data)
self._finishPlot(data, title)
def _finishPlot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
for i in self._computeResetIndices():
self.plt.axvline(i, color='black', alpha=0.5)
def _imshow(self, data, title, aspect='auto'):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect=aspect,
vmin=0,
vmax=1)
def _plotDistributions(self, data, title):
means = [numpy.mean(x) if len(x) else 0 for x in data]
maxs = [numpy.max(x) if len(x) else 0 for x in data]
self.plt.plot(range(len(data)), means, label='mean')
self.plt.plot(range(len(data)), maxs, label='max')
self.plt.legend(loc='lower right')
self._finishPlot(data, title)
if __name__ == "__main__":
agent = Agent()
Server(agent)
| agpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/contour.py | 10 | 68919 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib._contour as _contour
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
import matplotlib.mathtext as mathtext
import matplotlib.patches as mpatches
import matplotlib.texmanager as texmanager
import matplotlib.transforms as mtrans
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler(object):
"""Mixin to provide labelling capability to ContourSet"""
def clabel(self, *args, **kwargs):
"""
Label a contour plot.
Call signature::
clabel(cs, **kwargs)
Adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
size in points or relative size e.g., 'smaller', 'x-large'
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string), or it can be any callable, such
as a :class:`~matplotlib.ticker.Formatter` instance, that
returns a string when called with a numeric contour level.
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can be an iterable object of x,y tuples. Contour labels
will be created as if mouse is clicked at each x,y positions.
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual = kwargs.get('manual', False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = list(xrange(len(self.cvalues)))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
self.labelFontProps.set_size(fontsize)
font_size_pts = self.labelFontProps.get_size_in_points()
self.labelFontSizeList = [font_size_pts] * len(levels)
if _colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = list(xrange(len(self.labelLevelList)))
self.labelMappable = cm.ScalarMappable(cmap=cmap,
norm=colors.NoNorm())
self.labelXYs = []
if cbook.iterable(self.labelManual):
for x, y in self.labelManual:
self.add_label_near(x, y, inline,
inline_spacing)
elif self.labelManual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline, inline_spacing)
else:
self.labels(inline, inline_spacing)
# Hold on to some old attribute names. These are deprecated and will
# be removed in the near future (sometime after 2008-08-01), but
# keeping for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour, labelwidth):
"Return *False* if contours are too short for a label."
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return True
xmax = np.amax(linecontour[:, 0])
xmin = np.amin(linecontour[:, 0])
ymax = np.amax(linecontour[:, 1])
ymin = np.amin(linecontour[:, 1])
lw = labelwidth
if (xmax - xmin) > 1.2 * lw or (ymax - ymin) > 1.2 * lw:
return True
else:
return False
def too_close(self, x, y, lw):
"Return *True* if a label is already near this location."
for loc in self.labelXYs:
d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)
if d < 1.2 * lw:
return True
return False
def get_label_coords(self, distances, XX, YY, ysize, lw):
"""
Return x, y, and the index of a label location.
Labels are plotted at a location with the smallest
deviation of the contour from a straight line
unless there is another label nearby, in which case
the next best place on the contour is picked up.
If all such candidates are rejected, the beginning
of the contour is chosen.
"""
hysize = int(ysize / 2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x, y, lw):
continue
return x, y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x, y, ind
def get_label_width(self, lev, fmt, fsize):
"""
Return the width of the label in points.
"""
if not cbook.is_string_like(lev):
lev = self.get_text(lev, fmt)
lev, ismath = text.Text.is_math_text(lev)
if ismath == 'TeX':
if not hasattr(self, '_TeX_manager'):
self._TeX_manager = texmanager.TexManager()
lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,
fsize)
elif ismath:
if not hasattr(self, '_mathtext_parser'):
self._mathtext_parser = mathtext.MathTextParser('bitmap')
img, _ = self._mathtext_parser.parse(lev, dpi=72,
prop=self.labelFontProps)
lw = img.get_width() # at dpi=72, the units are PostScript points
else:
# width is much less than "font size"
lw = (len(lev)) * fsize * 0.6
return lw
def get_real_label_width(self, lev, fmt, fsize):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
This method is not being used, and may be modified or removed.
"""
# Find middle of axes
xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)
# Temporarily create text object
t = text.Text(xx[0], xx[1])
self.set_label_props(t, self.get_text(lev, fmt), 'k')
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2, 0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt, dict):
return fmt[lev]
elif six.callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find a good place to plot a label (relatively flat
part of the contour).
"""
nsize = len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize / labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = int(labelwidth)
XX = np.resize(linecontour[:, 0], (xsize, ysize))
YY = np.resize(linecontour[:, 1], (xsize, ysize))
# I might have fouled up the following:
yfirst = YY[:, 0].reshape(xsize, 1)
ylast = YY[:, -1].reshape(xsize, 1)
xfirst = XX[:, 0].reshape(xsize, 1)
xlast = XX[:, -1].reshape(xsize, 1)
s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)
L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel()
dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)
x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x, y))
return x, y, dind
def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None:
lc = []
# Half the label width
hlw = lw / 2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[slc[ind:-1], slc[:ind + 1]]
if len(lc): # Rotate lc also if not empty
lc = np.r_[lc[ind:-1], lc[:ind + 1]]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl - pl[ind]
# Use linear interpolation to get points around label
xi = np.array([-hlw, hlw])
if closed: # Look at end also for closed contours
dp = np.array([pl[-1], 0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation(pl, slc, dp + xi,
extrap=True)
# get vector in pixel space coordinates from one point to other
dd = np.diff(ll, axis=0).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd == 0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing, spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False)
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0]) != I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[0]])
if (not np.isnan(I[1])) and int(I[1]) != I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[1]])
# Round to integer values but keep as float
# To allow check against nan below
I = [np.floor(I[0]), np.ceil(I[1])]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append(np.r_[xy2, lc[int(I[1]):int(I[0]) + 1], xy1])
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append(np.r_[lc[:int(I[0]) + 1], xy1])
if not np.isnan(I[1]):
nlc.append(np.r_[xy2, lc[int(I[1]):]])
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavior.
# if not len(nlc): nlc = [ lc ]
return rotation, nlc
def _get_label_text(self, x, y, rotation):
dx, dy = self.ax.transData.inverted().transform_point((x, y))
t = text.Text(dx, dy, rotation=rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self, x, y, rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx, dy = transDataInv.transform_point((x, y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x, y]]))
t = ClabelText(dx, dy, rotation=drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
_text = self.get_text(lev, self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`~matplotlib.text.Text` class.
"""
t = self._get_label_text(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`ClabelText` class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point (x, y). If transform is None
(default), (x, y) is in data coordinates; if transform is
False, (x, y) is in display coordinates; otherwise, the
specified transform will be used to translate (x, y) into
display coordinates.
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
"""
if transform is None:
transform = self.ax.transData
if transform:
x, y = transform.transform_point((x, y))
# find the nearest contour _in screen units_
conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
x, y, self.labelIndiceList)[:5]
# The calc_label_rot_and_inline routine requires that (xmin,ymin)
# be a vertex in the path. So, if it isn't, add a vertex here
# grab the paths from the collections
paths = self.collections[conmin].get_paths()
# grab the correct segment
active_path = paths[segmin]
# grab it's verticies
lc = active_path.vertices
# sort out where the new vertex should be added data-units
xcmin = self.ax.transData.inverted().transform_point([xmin, ymin])
# if there isn't a vertex close enough
if not np.allclose(xcmin, lc[imin]):
# insert new data into the vertex list
lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]
# replace the path with the new one
paths[segmin] = mpath.Path(lc)
# Get index of nearest level in subset of levels used for labeling
lmin = self.labelIndiceList.index(conmin)
# Coordinates of contour
paths = self.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = self.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = self.get_label_width(self.labelLevelList[lmin],
self.labelFmt, self.labelFontSizeList[lmin])
# Figure out label rotation.
if inline:
lcarg = lc
else:
lcarg = None
rotation, nlc = self.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
inline_spacing)
self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
self.labelCValueList[lmin])
if inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n) > 1:
paths.append(mpath.Path(n))
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList,
self.labelFontSizeList, self.labelCValueList):
con = self.collections[icon]
trans = con.get_transform()
lw = self.get_label_width(lev, self.labelFmt, fsize)
lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon(lc):
slc = np.r_[slc0, slc0[1:2, :]]
else:
slc = slc0
# Check if long enough for a label
if self.print_label(slc, lw):
x, y, ind = self.locate_label(slc, lw)
if inline:
lcarg = lc
else:
lcarg = None
rotation, new = self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing)
# Actually add the label
add_label(x, y, rotation, lev, cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n) > 1:
additions.append(mpath.Path(n))
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
def _find_closest_point_on_leg(p1, p2, p0):
"""find closest point to p0 on line segment connecting p1 and p2"""
# handle degenerate case
if np.all(p2 == p1):
d = np.sum((p0 - p1)**2)
return d, p1
d21 = p2 - p1
d01 = p0 - p1
# project on to line segment to find closest point
proj = np.dot(d01, d21) / np.dot(d21, d21)
if proj < 0:
proj = 0
if proj > 1:
proj = 1
pc = p1 + proj * d21
# find squared distance
d = np.sum((pc-p0)**2)
return d, pc
def _find_closest_point_on_path(lc, point):
"""
lc: coordinates of vertices
point: coordinates of test point
"""
# find index of closest vertex for this segment
ds = np.sum((lc - point[None, :])**2, 1)
imin = np.argmin(ds)
dmin = np.inf
xcmin = None
legmin = (None, None)
closed = mlab.is_closed_polygon(lc)
# build list of legs before and after this vertex
legs = []
if imin > 0 or closed:
legs.append(((imin-1) % len(lc), imin))
if imin < len(lc) - 1 or closed:
legs.append((imin, (imin+1) % len(lc)))
for leg in legs:
d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)
if d < dmin:
dmin = d
xcmin = xc
legmin = leg
return (dmin, xcmin, legmin)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors`.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is *False* (default) or *True*.
The first three arguments must be:
*ax*: axes object.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
*allsegs*: [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``.
level0segs = [polygon0, polygon1, ...]
polygon0 = array_like [[x0,y0], [x1,y1], ...]
*allkinds*: *None* or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not *None*, len(allkinds) == len(allsegs).
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not *None*, usually all polygons for a particular
contour level are grouped together so that
level0segs = [polygon0] and level0kinds = [polygon0kinds].
Keyword arguments are as described in
:attr:`matplotlib.contour.QuadContourSet.contour_doc`.
**Examples:**
.. plot:: mpl_examples/misc/contour_manual.py
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.hatches = kwargs.get('hatches', [None])
self.alpha = kwargs.get('alpha', None)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
vmin = kwargs.get('vmin', None)
vmax = kwargs.get('vmax', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', None)
if self.antialiased is None and self.filled:
self.antialiased = False # eliminate artifacts; we are not
# stroking the boundaries.
# The default for line contours will be taken from
# the LineCollection default, which uses the
# rcParams['lines.antialiased']
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log '
' scale')
else:
self.logscale = False
if self.origin not in [None, 'lower', 'upper', 'image']:
raise ValueError("If given, *origin* must be one of [ 'lower' |"
" 'upper' | 'image']")
if self.extent is not None and len(self.extent) != 4:
raise ValueError("If given, *extent* must be '[ *None* |"
" (x0,x1,y0,y1) ]'")
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._transform = kwargs.get('transform', None)
self._process_args(*args, **kwargs)
self._process_levels()
if self.colors is not None:
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
extend_min = self.extend in ['min', 'both']
extend_max = self.extend in ['max', 'both']
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = ncolors + int(extend_min) + int(extend_max)
if (len(self.colors) == total_levels and
any([extend_min, extend_max])):
use_set_under_over = True
if extend_min:
i0 = 1
cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)
if use_set_under_over:
if extend_min:
cmap.set_under(self.colors[0])
if extend_max:
cmap.set_over(self.colors[-1])
if self.filled:
self.collections = cbook.silent_list('mcoll.PathCollection')
else:
self.collections = cbook.silent_list('mcoll.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
# sets self.cmap, norm if needed;
cm.ScalarMappable.__init__(self, **kw)
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self._process_colors()
self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
# Ensure allkinds can be zipped below.
if self.allkinds is None:
self.allkinds = [None] * len(self.allsegs)
for level, level_upper, segs, kinds in \
zip(lowers, uppers, self.allsegs, self.allkinds):
paths = self._make_paths(segs, kinds)
# Default zorder taken from Collection
zorder = kwargs.get('zorder', 1)
col = mcoll.PathCollection(
paths,
antialiaseds=(self.antialiased,),
edgecolors='none',
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
aa = self.antialiased
if aa is not None:
aa = (self.antialiased,)
for level, width, lstyle, segs in \
zip(self.levels, tlinewidths, tlinestyles, self.allsegs):
# Default zorder taken from LineCollection
zorder = kwargs.get('zorder', 2)
col = mcoll.LineCollection(
segs,
antialiaseds=aa,
linewidths=width,
linestyles=[lstyle],
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
col.set_label('_nolegend_')
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
for col in self.collections:
col.sticky_edges.x[:] = [self._mins[0], self._maxs[0]]
col.sticky_edges.y[:] = [self._mins[1], self._maxs[1]]
self.ax.update_datalim([self._mins, self._maxs])
self.ax.autoscale_view(tight=True)
self.changed() # set the colors
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this ContourSet.
"""
if self._transform is None:
self._transform = self.ax.transData
elif (not isinstance(self._transform, mtrans.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.ax)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object _contour_generator cannot currently be pickled. This
# isn't a big issue as it is not actually used once the contour has
# been calculated.
state['_contour_generator'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artist and labels suitable for passing through
to :func:`plt.legend` which represent this ContourSet.
Args:
*variable_name*: the string used inside the inequality used
on the labels
*str_format*: function used to format the numbers in the labels
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self.collections)
for i, (collection, lower, upper) in enumerate(
zip(self.collections, lowers, uppers)):
patch = mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=collection.get_facecolor()[0],
hatch=collection.get_hatch(),
alpha=collection.get_alpha())
artists.append(patch)
lower = str_format(lower)
upper = str_format(upper)
if i == 0 and self.extend in ('min', 'both'):
labels.append(r'$%s \leq %s$' % (variable_name,
lower))
elif i == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(r'$%s > %s$' % (variable_name,
upper))
else:
labels.append(r'$%s < %s \leq %s$' % (lower,
variable_name,
upper))
else:
for collection, level in zip(self.collections, self.levels):
patch = mcoll.LineCollection(None)
patch.update_from(collection)
artists.append(patch)
# format the level for insertion into the labels
level = str_format(level)
labels.append(r'$%s = %s$' % (variable_name, level))
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update axes
limits.
"""
self.levels = args[0]
self.allsegs = args[1]
self.allkinds = len(args) > 2 and args[2] or None
self.zmax = np.amax(self.levels)
self.zmin = np.amin(self.levels)
self._auto = False
# Check lengths of levels and allsegs.
if self.filled:
if len(self.allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(self.allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if (self.allkinds is not None and
len(self.allkinds) != len(self.allsegs)):
raise ValueError('allkinds has different length to allsegs')
# Determine x,y bounds and update axes data limits.
flatseglist = [s for seg in self.allsegs for s in seg]
points = np.concatenate(flatseglist, axis=0)
self._mins = points.min(axis=0)
self._maxs = points.max(axis=0)
def _get_allsegs_and_allkinds(self):
"""
Override in derived classes to create and return allsegs and allkinds.
allkinds can be None.
"""
return self.allsegs, self.allkinds
def _get_lowers_and_uppers(self):
"""
Return (lowers,uppers) for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def _make_paths(self, segs, kinds):
if kinds is not None:
return [mpath.Path(seg, codes=kind)
for seg, kind in zip(segs, kinds)]
else:
return [mpath.Path(seg) for seg in segs]
def changed(self):
tcolors = [(tuple(rgba),)
for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
hatches = self.hatches * len(tcolors)
for color, hatch, collection in zip(tcolors, hatches,
self.collections):
if self.filled:
collection.set_facecolor(color)
# update the collection's hatch (may be None)
collection.set_hatch(hatch)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" %
(fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
if len(self.levels) > 1 and np.amin(np.diff(self.levels)) <= 0.0:
if hasattr(self, '_corner_mask') and self._corner_mask == 'legacy':
warnings.warn("Contour levels are not increasing")
else:
raise ValueError("Contour levels must be increasing")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# The following attributes are no longer needed, and
# should be deprecated and removed to reduce confusion.
self.vmin = np.amin(self.levels)
self.vmax = np.amax(self.levels)
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1], self.zmax) + 1)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# layer values are mid-way between levels
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
# ...except that extended layers must be outside the
# normed range:
if self.extend in ('both', 'min'):
self.layers[0] = -np.inf
if self.extend in ('both', 'max'):
self.layers[-1] = np.inf
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
self.set_array(self.levels)
self.autoscale_None()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev / len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
"""returns alpha to be applied to all ContourSet artists"""
return self.alpha
def set_alpha(self, alpha):
"""sets alpha for all ContourSet artists"""
self.alpha = alpha
self.changed()
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Optional keyword arguments:
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices is None:
indices = list(xrange(len(self.levels)))
dmin = np.inf
conmin = None
segmin = None
xmin = None
ymin = None
point = np.array([x, y])
for icon in indices:
con = self.collections[icon]
trans = con.get_transform()
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = trans.transform(lc)
d, xc, leg = _find_closest_point_on_path(lc, point)
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = leg[1]
xmin = xc[0]
ymin = xc[1]
return (conmin, segmin, imin, xmin, ymin, dmin)
class QuadContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions.
User-callable method: :meth:`clabel`
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
A silent_list of LineCollections or PolyCollections
levels:
Contour levels
layers:
Same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors` method.
"""
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], QuadContourSet):
if self.levels is None:
self.levels = args[0].levels
self.zmin = args[0].zmin
self.zmax = args[0].zmax
self._corner_mask = args[0]._corner_mask
if self._corner_mask == 'legacy':
contour_generator = args[0].Cntr
else:
contour_generator = args[0]._contour_generator
self._mins = args[0]._mins
self._maxs = args[0]._maxs
else:
self._corner_mask = kwargs.get('corner_mask', None)
if self._corner_mask is None:
self._corner_mask = mpl.rcParams['contour.corner_mask']
x, y, z = self._contour_args(args, kwargs)
_mask = ma.getmask(z)
if _mask is ma.nomask or not _mask.any():
_mask = None
if self._corner_mask == 'legacy':
cbook.warn_deprecated('1.5',
name="corner_mask='legacy'",
alternative='corner_mask=False or True')
contour_generator = _cntr.Cntr(x, y, z.filled(), _mask)
else:
contour_generator = _contour.QuadContourGenerator(
x, y, z.filled(), _mask, self._corner_mask, self.nchunk)
t = self.get_transform()
# if the transform is not trans data, and some part of it
# contains transData, transform the xs and ys to data coordinates
if (t != self.ax.transData and
any(t.contains_branch_seperately(self.ax.transData))):
trans_to_data = t - self.ax.transData
pts = (np.vstack([x.flat, y.flat]).T)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
self._mins = [ma.min(x), ma.min(y)]
self._maxs = [ma.max(x), ma.max(y)]
if self._corner_mask == 'legacy':
self.Cntr = contour_generator
else:
self._contour_generator = contour_generator
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for level, level_upper in zip(lowers, uppers):
if self._corner_mask == 'legacy':
nlist = self.Cntr.trace(level, level_upper,
nchunk=self.nchunk)
nseg = len(nlist) // 2
vertices = nlist[:nseg]
kinds = nlist[nseg:]
else:
vertices, kinds = \
self._contour_generator.create_filled_contour(
level, level_upper)
allsegs.append(vertices)
allkinds.append(kinds)
else:
allkinds = None
for level in self.levels:
if self._corner_mask == 'legacy':
nlist = self.Cntr.trace(level)
nseg = len(nlist) // 2
vertices = nlist[:nseg]
else:
vertices = self._contour_generator.create_contour(level)
allsegs.append(vertices)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" %
(fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = float(z.max())
self.zmin = float(z.min())
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <= 0 have been masked')
self.zmin = float(z.min())
self._contour_level_args(z, args)
return (x, y, z)
def _check_xyz(self, args, kwargs):
"""
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
"""
x, y = args[:2]
self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.ax.convert_xunits(x)
y = self.ax.convert_yunits(y)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else:
Ny, Nx = z.shape
if x.ndim != y.ndim:
raise TypeError("Number of dimensions of x and y should match.")
if x.ndim == 1:
nx, = x.shape
ny, = y.shape
if nx != Nx:
raise TypeError("Length of x must be number of columns in z.")
if ny != Ny:
raise TypeError("Length of y must be number of rows in z.")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError("Shape of x does not match that of z: found "
"{0} instead of {1}.".format(x.shape, z.shape))
if y.shape != z.shape:
raise TypeError("Shape of y does not match that of z: found "
"{0} instead of {1}.".format(y.shape, z.shape))
else:
raise TypeError("Inputs x and y must be 1D or 2D.")
return x, y, z
def _initialize_x_y(self, z):
"""
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
"""
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0, x1, y0, y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0, x1, y0, y1 = (0, Nx, 0, Ny)
else:
x0, x1, y0, y1 = self.extent
dx = float(x1 - x0) / Nx
dy = float(y1 - y0) / Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x, y)
contour_doc = """
Plot contours.
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the MATLAB
version in that it does not draw the polygon edges.
To draw edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
Call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (x, y) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour up to *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*,
which must be in increasing order.
::
contourf(..., V)
fill the ``len(V)-1`` regions between the values in *V*,
which must be in increasing order.
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X* and *Y* must both be 2-D with the same shape as *Z*, or they
must both be 1-D such that ``len(X)`` is the number of columns in
*Z* and ``len(Y)`` is the number of rows in *Z*.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.QuadContourSet` object.
Optional keyword arguments:
*corner_mask*: [ *True* | *False* | 'legacy' ]
Enable/disable corner masking, which only has an effect if *Z* is
a masked array. If *False*, any quad touching a masked point is
masked out. If *True*, only the triangular corners of quads
nearest those points are always masked out, other triangular
corners comprising three unmasked points are contoured as usual.
If 'legacy', the old contouring algorithm is used, which is
equivalent to *False* and is deprecated, only remaining whilst the
new algorithm is tested fully.
If not specified, the default is taken from
rcParams['contour.corner_mask'], which is True unless it has
been modified.
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*vmin*, *vmax*: [ *None* | scalar ]
If not *None*, either or both of these values will be
supplied to the :class:`matplotlib.colors.Normalize`
instance, overriding the default color scaling based on
*levels*.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw, in increasing order; e.g., to draw just
the zero contour pass ``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is *None*, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
*antialiased*: [ *True* | *False* ]
enable antialiasing, overriding the defaults. For
filled contours, the default is *True*. For line contours,
it is taken from rcParams['lines.antialiased'].
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of *nchunk* by *nchunk* quads.
Chunking reduces the maximum length of polygons generated by the
contouring algorithm which reduces the rendering workload passed
on to the backend and also requires slightly less RAM. It can
however introduce rendering artifacts at chunk boundaries depending
on the backend, the *antialiased* flag and value of *alpha*.
contour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified.
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the default is 'solid' unless
the lines are monochrome. In that case, negative
contours will take their linestyle from the ``matplotlibrc``
``contour.negative_linestyle`` setting.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
contourf-only keyword arguments:
*hatches*:
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Note: contourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
.. plot:: mpl_examples/pylab_examples/contourf_demo.py
.. plot:: mpl_examples/pylab_examples/contour_corner_mask.py
"""
| apache-2.0 |
kiwifb/numpy | numpy/lib/npyio.py | 1 | 73745 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
NICTA/dora | dora/regressors/gp/demo.py | 1 | 3125 | """ simple_regression.py
This demo shows how to construct a simple regression by composing a kernel
and optimising its hyperparameters.
"""
import numpy as np
import matplotlib.pyplot as pl
import dora.regressors.gp as gp
def main():
nTrain = 20
nQuery = 100
nDraws = 20
nDims = 1
seed = 100
# Make test dataset:
np.random.seed(seed)
X = np.random.uniform(0, 30, size=(nTrain,nDims))
X = X[np.argsort(X[:,0])]
noise = np.random.normal(loc=0.0, scale=0.05, size=(nTrain,1))
def ground_truth(X):
return np.sin(X-5) + np.sin(X/2-2) + 0.4*np.sin(X/5-2) + 0.4*np.sin(X-3) + 0.2*np.sin(X/0.3-3)
Y = ground_truth(X)
Y = Y[:,0]
Xs = np.linspace(0., 30., nQuery)[:,np.newaxis]
# Whiten inputs and de-mean outputs:
Xw = X
Xsw = Xs
data_mean = np.mean(Y, axis=0)
Ys = Y - data_mean
# Define a GP kernel:
def mykernel(h, k):
# a fun pathological example
a = h(0.1, 5, 0.1) # We can use the same parameter multiple times!
b = h(0.1, 5, 0.1) # or just define it inline later
return b*k('matern3on2', a)
# return a*k('gaussian', b) + b*k('matern3on2', a)
# We can automatically extract the upper and lower theta vectors
myKernelFn = gp.compose(mykernel) # callable covariance underlyingFunction
myPrintFn = gp.describer(mykernel)
# Set up optimisation
opt_config = gp.OptConfig()
opt_config.sigma = gp.auto_range(mykernel)
opt_config.noise = gp.Range([0.0001], [0.5], [0.05])
opt_config.walltime = 3.0
# Learning signal and noise hyperparameters
hyper_params = gp.learn(Xw, Ys, myKernelFn, opt_config)
print('Final kernel:', myPrintFn(hyper_params), '+ noise', hyper_params[1])
# to extract the hypers:
hypers = gp.train.pack(hyper_params[0], hyper_params[1])[0]
# Reonstitute them, using the kernel definition to define the structure:
theta0, structure = gp.train.pack(gp.auto_range(mykernel).initialVal, [0])
reconstitute = gp.train.unpack(hypers, structure)
regressor = gp.condition(Xw, Ys, myKernelFn, hyper_params)
query = gp.query(Xsw, regressor)
# import IPython; IPython.embed(); import sys; sys.exit()
post_mu = gp.mean(regressor, query)
post_cov = gp.predict.covariance(regressor, query) # for draws
post_var = gp.variance(regressor, query)
draws = gp.predict.draws(nDraws, post_mu, post_cov)
# Shift outputs back:
post_mu += data_mean
draws = [draw+data_mean for draw in draws]
# Plot
fig = pl.figure()
ax = fig.add_subplot(121)
ax.plot(Xs, post_mu, 'k-')
post_mu = post_mu[:,np.newaxis]
real_var = (post_var + noise[0]**2)[:,np.newaxis]
upper = (post_mu + 2*np.sqrt(real_var))
lower = (post_mu - 2*np.sqrt(real_var))
ax.fill_between(Xs.ravel(), upper.ravel(),lower.ravel(), facecolor=(0.9,0.9,0.9), edgecolor=(0.5,0.5,0.5))
ax.plot(regressor.X[:,0], regressor.y+data_mean,'r.')
ax = fig.add_subplot(122)
for i in range(nDraws):
ax.plot(Xs.ravel(), draws[i])
pl.show()
if __name__ == "__main__":
main()
| apache-2.0 |
MatthieuBizien/scikit-learn | sklearn/ensemble/gradient_boosting.py | 25 | 71089 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
*presort* parameter.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
.. versionadded:: 0.17
optional parameter *presort*.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
udacity/deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
changyy/py-MLHelper | org/changyy/ml/feature_engineering.py | 1 | 3172 | def fill_value_via_statistics_handler(panda_obj, column_name='age', max_value=120):
import pandas
import numpy
if type(panda_obj) is not pandas.core.frame.DataFrame:
return panda_obj
panda_obj.loc[ panda_obj[column_name][panda_obj[column_name] > max_value ].index , column_name ] = numpy.nan
value_avg = panda_obj[column_name].mean()
value_std = panda_obj[column_name].std()
value_null_count = panda_obj[column_name].isnull().sum()
numpy.random.seed(0)
value_null_random_list = numpy.random.randint(value_avg - value_std, value_avg + value_std, size=value_null_count)
panda_obj.loc[ panda_obj[column_name][numpy.isnan(panda_obj[column_name])].index , column_name ] = value_null_random_list
panda_obj[column_name] = panda_obj[column_name].astype(int)
return panda_obj
def data_functional_handler_process(panda_obj,column_handler={}):
import pandas
import numpy
if type(panda_obj) is not pandas.core.frame.DataFrame:
return panda_obj
if type(column_handler) is not dict:
return panda_obj
for column in list(set(panda_obj.columns)):
if column in column_handler:
column_handler[column](panda_obj)
return panda_obj
def data_numeric_handler_process(panda_obj,skip_columns=[],target_columns=[],onehotencode_columns=[],lookup_table={}):
import pandas
import numpy
from sklearn.preprocessing import OneHotEncoder
if type(panda_obj) is not pandas.core.frame.DataFrame:
return lookup_table, panda_obj
if target_columns is None or len(target_columns) == 0:
target_columns = list(set(panda_obj.columns))
if skip_columns is not None and len(skip_columns) > 0:
for column in skip_columns:
if column in target_columns:
target_columns.remove(column)
if onehotencode_columns is not None and len(onehotencode_columns) > 0:
for column in onehotencode_columns:
if column in target_columns:
target_columns.remove(column)
for column in target_columns:
if column not in lookup_table:
lookup_table[column] = dict((value,index+1) for index, value in enumerate(panda_obj[column].unique()))
else:
max_index = len(lookup_table[column]) + 1
for value in panda_obj[column].unique():
if value not in lookup_table[column]:
lookup_table[column][value] = max_index
max_index = max_index + 1
panda_obj[column] = panda_obj[column].map(lookup_table[column])
panda_obj[column] = panda_obj[column].fillna(0)
panda_obj[column] = panda_obj[column].astype(int)
for column in onehotencode_columns:
if column not in lookup_table:
values = list(set(panda_obj[column].unique()))
lookup_table[column] = dict((value,index) for index, value in enumerate(panda_obj[column].unique()))
panda_obj[column] = panda_obj[column].map(lookup_table[column])
#panda_obj[column] = panda_obj[column].fillna(0)
panda_obj[column] = panda_obj[column].astype(int)
onehot_encoder = OneHotEncoder(sparse=False,n_values=len(lookup_table[column]))
onehot_result = onehot_encoder.fit_transform(panda_obj[column].values.reshape(panda_obj[column].shape[0], 1))
for index in range(len(lookup_table[column])):
panda_obj['OneHotEncode-'+column+'-'+str(index)] = onehot_result[:,index]
return lookup_table, panda_obj
| mit |
wdurhamh/statsmodels | statsmodels/sandbox/examples/example_garch.py | 31 | 2294 | import numpy as np
import matplotlib.pyplot as plt
#import scikits.timeseries as ts
#import scikits.timeseries.lib.plotlib as tpl
import statsmodels.api as sm
#from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
#dta2 = ts.tsfromtxt(r'gspc_table.csv',
# datecols=0, skiprows=0, delimiter=',',names=True, freq='D')
#print dta2
aa=np.genfromtxt(r'gspc_table.csv', skip_header=0, delimiter=',', names=True)
cl = aa['Close']
ret = np.diff(np.log(cl))[-2000:]*1000.
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.1, 0.1, 0.1, 0.1])
ggres = ggmod.fit(start_params=np.array([-0.1, 0.1, 0.1, 0.0]),
maxiter=1000,method='bfgs')
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
use_rpy = False
if use_rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = ret - ret.mean(), include_mean=False)
f = r.formula('~arma(1,1) + ~garch(1, 1)')
fit = r.garchFit(f, data = ret)
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, ret - ret.mean())[0], [0.01, 0.1, 0.1])
print(g11res)
llf = loglike_GARCH11(g11res, ret - ret.mean())
print(llf[0])
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)#, method='ncg')
print('ggres0.params', ggres0.params)
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
ggmod._start_params = start_params
ggres = ggmod.fit(start_params=start_params, maxiter=1000)#,method='bfgs')
print('ggres.params', ggres.params)
| bsd-3-clause |
astocko/statsmodels | statsmodels/tools/testing.py | 23 | 1443 | """assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| bsd-3-clause |
Neurita/darwin | darwin/gini.py | 1 | 4714 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import LeaveOneOut
from .pipeline import ClassificationPipeline
class FeaturesGiniIndex(object):
"""This class wraps a classification method to estimate discrimination
Gini indices from a set of features using an sklearn.ExtraTreesClassifier
"""
def fit_transform(self, samples, targets, n_cpus=1):
"""Return the average Gini-index for each sample in a LeaveOneOut
classification Cross-validation test using ExtraTreesClassifier.
Returns
-------
array_like
Vector of the size of number of features in each sample.
"""
n_feats = samples.shape[1]
pipe = ClassificationPipeline(clfmethod='extratrees', n_feats=n_feats,
cvmethod='loo', n_cpus=n_cpus)
self.results_, self.metrics_ = pipe.cross_validation(samples, targets)
ginis = np.array(list(self.results_.features_importance.values()))
return ginis.mean(axis=0)
def get_gini_indices(samples, targets):
"""
:param samples:
:param targets:
:return:
"""
# Leave One Out
cv = LeaveOneOut(len(targets))
feat_imp = np.zeros(samples.shape[1])
for train, test in cv:
x_train, x_test, \
y_train, y_test = samples[train, :], samples[test, :], \
targets[train], targets[test]
# We correct NaN values in x_train and x_test
nan_mean = stats.nanmean(x_train)
nan_train = np.isnan(x_train)
nan_test = np.isnan(x_test)
x_test[nan_test] = 0
x_test = x_test + nan_test*nan_mean
x_train[nan_train] = 0
x_train = x_train + nan_train*nan_mean
# Compute mean, std and noise for z-score
std = np.std(x_train, axis=0)
med = np.mean(x_train, axis=0)
noise = [np.random.uniform(-1.e-10, 1.e-10) for p in range(0, x_train.shape[1])]
# Apply Z-score
x_train = (x_train-med)/(std+noise)
#x_test = (x_test-med)/(std+noise)
# RFE
# http://scikit-learn.org/stable/modules/generated/
# sklearn.feature_selection.RFECV.html#sklearn.feature_selection.RFECV
# Classifier type.
classifier = ExtraTreesClassifier()
classifier = classifier.fit(x_train, y_train)
feat_imp += classifier.feature_importances_
res = np.around(feat_imp/x_train.shape[0], decimals=4)
return res
def plot_gini_indices(ginis, var_names, comparison_name,
num_vars_to_plot=20):
"""Plots the Gini Indices of the top num_vars_to_plot
variables when discriminating the samples according to targets.
Parameters
----------
ginis : np.ndarray
Shape 1 x M where M is the number of variables
targets: np.ndarray or list
Shape 1xN target labels
var_names: list of strings
Names of the variables for plotting, in the same order as in
ginis.
comparison_name: string
Plot base title
num_vars_to_plot: int
"""
if num_vars_to_plot > len(ginis):
num_vars_to_plot = len(ginis)
ginis_sort_idx = np.argsort(ginis)[::-1]
idx_for_plot = ginis_sort_idx[0:num_vars_to_plot]
sorted_ginis = ginis[idx_for_plot]
plot_var_names = np.array(var_names)[idx_for_plot]
fig = plt.figure()#figsize=(6, 4))
ax = plt.subplot(111)
#plot bars
plt.bar(range(num_vars_to_plot), sorted_ginis, color="b",
align="center",
alpha=0.5, # transparency
width=0.5,) # smaller bar width
# set height of the y-axis
#max_y = max(zip(mean_values, variance)) # returns a tuple
#plt.ylim([0, (max_y[0] + max_y[1]) * 1.1])
plt.ylim([0, 1])
plt.xlim([-1, num_vars_to_plot])
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding custom horizontal grid lines
for y in np.linspace(0.2, 1, 4):
plt.axhline(y=y, xmin=0, xmax=4,
color="gray", linestyle="--", alpha=0.4)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
#ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# set axes labels and title
plt.title("Gini index {}".format(comparison_name),
horizontalalignment='center',
fontsize=14)
plt.xticks(range(num_vars_to_plot), plot_var_names, rotation=90)
return fig
| bsd-3-clause |
WarrenWeckesser/scipy | scipy/signal/bsplines.py | 12 | 19509 | from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
Parameters
----------
Iin : array_like
input data set
lmbda : float, optional
spline smooghing fall-off value, default is `5.0`.
Returns
-------
res : ndarray
filterd input data
Examples
--------
We can filter an multi dimentional signal (ex: 2D image) using cubic
B-spline filter:
>>> from scipy.signal import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, lmbda=0.1)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = float_factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values
See Also
--------
cubic : A cubic B-spline.
quadratic : A quadratic B-spline.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
r"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values approximated by a zero-mean Gaussian
function.
Notes
-----
The B-spline basis function can be approximated well by a zero-mean
Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
for large `n` :
.. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
.. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
Examples
--------
We can calculate B-Spline basis functions approximated by a gaussian
distribution:
>>> from scipy.signal import gauss_spline, bspline
>>> knots = np.array([-1.0, 0.0, -1.0])
>>> gauss_spline(knots, 3)
array([0.15418033, 0.6909883, 0.15418033]) # may vary
>>> bspline(knots, 3)
array([0.16666667, 0.66666667, 0.16666667]) # may vary
"""
x = asarray(x)
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Cubic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
quadratic : A quadratic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Quadratic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
cubic : A cubic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
See Also
--------
cspline1d_eval : Evaluate a cubic spline at the new set of points.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a cubic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Parameters
----------
cj : ndarray
cublic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a cubic spline points.
See Also
--------
cspline1d : Compute cubic spline coefficients for rank-1 array.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| bsd-3-clause |
caseyclements/blaze | blaze/compute/pandas.py | 2 | 16522 | """
>>> from blaze.expr import symbol
>>> from blaze.compute.pandas import compute
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from pandas import DataFrame
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> df = DataFrame(data, columns=['name', 'amount'])
>>> compute(deadbeats, df)
1 Bob
2 Charlie
Name: name, dtype: object
"""
from __future__ import absolute_import, division, print_function
import fnmatch
import itertools
import numpy as np
import pandas as pd
from pandas.core.generic import NDFrame
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
from toolz import merge as merge_dicts
from toolz.curried import pipe, filter, map, concat
import datashape
from datashape import to_numpy_dtype
from datashape.predicates import isscalar
from odo import into
from ..dispatch import dispatch
from .core import compute, compute_up, base
from ..expr import (Projection, Field, Sort, Head, Tail, Broadcast, Selection,
Reduction, Distinct, Join, By, Summary, Label, ReLabel,
Map, Apply, Merge, std, var, Like, Slice, summary,
ElemWise, DateTime, Millisecond, Expr, Symbol, IsIn,
UTCFromTimestamp, nelements, DateTimeTruncate, count,
UnaryStringFunction, nunique, Coerce, Concat,
isnan, notnull)
from ..expr import UnaryOp, BinOp, Interp
from ..expr import symbol, common_subexpression
from ..compatibility import _inttypes
__all__ = []
@dispatch(Projection, DataFrame)
def compute_up(t, df, **kwargs):
return df[list(t.fields)]
@dispatch(Field, (DataFrame, DataFrameGroupBy))
def compute_up(t, df, **kwargs):
assert len(t.fields) == 1
return df[t.fields[0]]
@dispatch(Field, Series)
def compute_up(t, data, **kwargs):
assert len(t.fields) == 1
if t.fields[0] == data.name:
return data
else:
raise ValueError("Fieldname %r does not match Series name %r"
% (t.fields[0], data.name))
@dispatch(Broadcast, DataFrame)
def compute_up(t, df, **kwargs):
d = dict((t._child[c]._expr, df[c]) for c in t._child.fields)
return compute(t._expr, d)
@dispatch(Broadcast, Series)
def compute_up(t, s, **kwargs):
return compute_up(t, s.to_frame(), **kwargs)
@dispatch(Interp, Series)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return data % t.rhs
else:
return t.lhs % data
@compute_up.register(Interp, Series, (Series, base))
@compute_up.register(Interp, base, Series)
def compute_up_pd_interp(t, lhs, rhs, **kwargs):
return lhs % rhs
@dispatch(BinOp, Series)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@dispatch(BinOp, Series, (Series, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, (Series, base), Series)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, NDFrame)
def compute_up(t, df, **kwargs):
f = getattr(t, 'op', getattr(np, t.symbol, None))
if f is None:
raise ValueError('%s is not a valid operation on %s objects' %
(t.symbol, type(df).__name__))
return f(df)
@dispatch(Selection, (Series, DataFrame))
def compute_up(t, df, **kwargs):
predicate = compute(t.predicate, {t._child: df})
return df[predicate]
@dispatch(Join, DataFrame, DataFrame)
def compute_up(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(
lhs,
rhs,
left_on=t.on_left,
right_on=t.on_right,
how=t.how,
suffixes=t.suffixes,
)
return result.reset_index()[t.fields]
@dispatch(isnan, pd.Series)
def compute_up(expr, data, **kwargs):
return data.isnull()
@dispatch(notnull, pd.Series)
def compute_up(expr, data, **kwargs):
return data.notnull()
pandas_structure = DataFrame, Series, DataFrameGroupBy, SeriesGroupBy
@dispatch(Concat, pandas_structure, pandas_structure)
def compute_up(t, lhs, rhs, _concat=pd.concat, **kwargs):
if not (isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs))):
raise TypeError('lhs and rhs must be the same type')
return _concat((lhs, rhs), axis=t.axis, ignore_index=True)
def get_scalar(result):
# pandas may return an int, numpy scalar or non scalar here so we need to
# program defensively so that things are JSON serializable
try:
return result.item()
except (AttributeError, ValueError):
return result
@dispatch(Reduction, (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = get_scalar(getattr(s, t.symbol)())
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch((std, var), (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = get_scalar(getattr(s, t.symbol)(ddof=t.unbiased))
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch(Distinct, DataFrame)
def compute_up(t, df, **kwargs):
return df.drop_duplicates(subset=t.on or None).reset_index(drop=True)
@dispatch(Distinct, Series)
def compute_up(t, s, **kwargs):
if t.on:
raise ValueError('malformed expression: no columns to distinct on')
return s.drop_duplicates().reset_index(drop=True)
@dispatch(nunique, DataFrame)
def compute_up(expr, data, **kwargs):
return compute_up(expr._child.distinct().count(), data, **kwargs)
string_func_names = {
'strlen': 'len',
}
@dispatch(UnaryStringFunction, Series)
def compute_up(expr, data, **kwargs):
name = type(expr).__name__
return getattr(data.str, string_func_names.get(name, name))()
def unpack(seq):
""" Unpack sequence of length one
>>> unpack([1, 2, 3])
[1, 2, 3]
>>> unpack([1])
1
"""
seq = list(seq)
if len(seq) == 1:
seq = seq[0]
return seq
Grouper = ElemWise, Series, list
@dispatch(By, list, DataFrame)
def get_grouper(c, grouper, df):
return grouper
@dispatch(By, Expr, NDFrame)
def get_grouper(c, grouper, df):
g = compute(grouper, {c._child: df})
if isinstance(g, Series):
return g
if isinstance(g, DataFrame):
return [g[col] for col in g.columns]
@dispatch(By, (Field, Projection), NDFrame)
def get_grouper(c, grouper, df):
return grouper.fields
@dispatch(By, Reduction, Grouper, NDFrame)
def compute_by(t, r, g, df):
names = [r._name]
preapply = compute(r._child, {t._child: df})
# Pandas and Blaze column naming schemes differ
# Coerce DataFrame column names to match Blaze's names
preapply = preapply.copy()
if isinstance(preapply, Series):
preapply.name = names[0]
else:
preapply.names = names
group_df = concat_nodup(df, preapply)
gb = group_df.groupby(g)
groups = gb[names[0] if isscalar(t.apply._child.dshape.measure) else names]
return compute_up(r, groups) # do reduction
name_dict = dict()
seen_names = set()
def _name(expr):
""" A unique and deterministic name for an expression """
if expr in name_dict:
return name_dict[expr]
result = base = expr._name or '_'
if result in seen_names:
for i in itertools.count(1):
result = '%s_%d' % (base, i)
if result not in seen_names:
break
# result is an unseen name
seen_names.add(result)
name_dict[expr] = result
return result
def fancify_summary(expr):
""" Separate a complex summary into two pieces
Helps pandas compute_by on summaries
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> one, two, three = fancify_summary(summary(a=t.x.sum(), b=t.x.sum() + t.y.count() - 1))
A simpler summary with only raw reductions
>>> one
summary(x_sum=sum(t.x), y_count=count(t.y))
A mapping of those names to new leaves to use in another compuation
>>> two # doctest: +SKIP
{'x_sum': x_sum, 'y_count': y_count}
A mapping of computations to do for each column
>>> three # doctest: +SKIP
{'a': x_sum, 'b': (x_sum + y_count) - 1}
In this way, ``compute_by`` is able to do simple pandas reductions using
groups.agg(...) and then do columnwise arithmetic afterwards.
"""
seen_names.clear()
name_dict.clear()
exprs = pipe(expr.values,
map(Expr._traverse),
concat,
filter(lambda x: isinstance(x, Reduction)),
set)
one = summary(**dict((_name(expr), expr) for expr in exprs))
two = dict((_name(expr), symbol(_name(expr), datashape.var * expr.dshape))
for expr in exprs)
d = dict((expr, two[_name(expr)]) for expr in exprs)
three = dict((name, value._subs(d)) for name, value in zip(expr.names,
expr.values))
return one, two, three
@dispatch(By, Summary, Grouper, NDFrame)
def compute_by(t, s, g, df):
one, two, three = fancify_summary(s) # see above
names = one.fields
preapply = DataFrame(dict(zip(names,
[compute(v._child, {t._child: df})
for v in one.values])))
if not df.index.equals(preapply.index):
df = df.loc[preapply.index]
df2 = concat_nodup(df, preapply)
groups = df2.groupby(g)
d = dict((name, v.symbol) for name, v in zip(one.names, one.values))
result = groups.agg(d)
scope = dict((v, result[k]) for k, v in two.items())
cols = [compute(expr.label(name), scope) for name, expr in three.items()]
result2 = pd.concat(cols, axis=1)
# Rearrange columns to match names order
result3 = result2[sorted(result2.columns, key=lambda t: s.fields.index(t))]
return result3
@dispatch(Expr, DataFrame)
def post_compute_by(t, df):
return df.reset_index(drop=True)
@dispatch((Summary, Reduction), DataFrame)
def post_compute_by(t, df):
return df.reset_index()
@dispatch(By, NDFrame)
def compute_up(t, df, **kwargs):
grouper = get_grouper(t, t.grouper, df)
result = compute_by(t, t.apply, grouper, df)
result2 = post_compute_by(t.apply, into(DataFrame, result))
if isinstance(result2, DataFrame):
result2.columns = t.fields
return result2
def concat_nodup(a, b):
""" Concatenate two dataframes/series without duplicately named columns
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Charlie', 300]],
... columns=['id','name', 'amount'])
>>> concat_nodup(df, df)
id name amount
0 1 Alice 100
1 2 Bob -200
2 3 Charlie 300
>>> concat_nodup(df.name, df.amount)
name amount
0 Alice 100
1 Bob -200
2 Charlie 300
>>> concat_nodup(df, df.amount + df.id)
id name amount 0
0 1 Alice 100 101
1 2 Bob -200 -198
2 3 Charlie 300 303
"""
if isinstance(a, DataFrame) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],
axis=1)
if isinstance(a, DataFrame) and isinstance(b, Series):
if b.name not in a.columns:
return pd.concat([a, b], axis=1)
else:
return a
if isinstance(a, Series) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)
if isinstance(a, Series) and isinstance(b, Series):
if a.name == b.name:
return a
else:
return pd.concat([a, b], axis=1)
@dispatch(Sort, DataFrame)
def compute_up(t, df, **kwargs):
return df.sort(t.key, ascending=t.ascending)
@dispatch(Sort, Series)
def compute_up(t, s, **kwargs):
try:
return s.sort_values(ascending=t.ascending)
except AttributeError:
return s.order(ascending=t.ascending)
@dispatch(Head, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return df.head(t.n)
@dispatch(Tail, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return df.tail(t.n)
@dispatch(Label, DataFrame)
def compute_up(t, df, **kwargs):
return DataFrame(df, columns=[t.label])
@dispatch(Label, Series)
def compute_up(t, df, **kwargs):
return Series(df, name=t.label)
@dispatch(ReLabel, DataFrame)
def compute_up(t, df, **kwargs):
return df.rename(columns=dict(t.labels))
@dispatch(ReLabel, Series)
def compute_up(t, s, **kwargs):
labels = t.labels
if len(labels) > 1:
raise ValueError('You can only relabel a Series with a single name')
pair, = labels
_, replacement = pair
return Series(s, name=replacement)
@dispatch(Map, DataFrame)
def compute_up(t, df, **kwargs):
return df.apply(lambda tup: t.func(*tup), axis=1)
@dispatch(Map, Series)
def compute_up(t, df, **kwargs):
result = df.map(t.func)
try:
result.name = t._name
except NotImplementedError:
# We don't have a schema, but we should still be able to map
result.name = df.name
return result
@dispatch(Apply, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return t.func(df)
@dispatch(Merge, NDFrame)
def compute_up(t, df, scope=None, **kwargs):
subexpression = common_subexpression(*t.children)
scope = merge_dicts(scope or {}, {subexpression: df})
children = [compute(_child, scope) for _child in t.children]
return pd.concat(children, axis=1)
@dispatch(Summary, DataFrame)
def compute_up(expr, data, **kwargs):
values = [compute(val, {expr._child: data}) for val in expr.values]
if expr.keepdims:
return DataFrame([values], columns=expr.fields)
else:
return Series(dict(zip(expr.fields, values)))
@dispatch(Summary, Series)
def compute_up(expr, data, **kwargs):
result = tuple(compute(val, {expr._child: data}) for val in expr.values)
if expr.keepdims:
result = [result]
return result
@dispatch(Like, DataFrame)
def compute_up(expr, df, **kwargs):
arrs = [df[name].str.contains('^%s$' % fnmatch.translate(pattern))
for name, pattern in expr.patterns.items()]
return df[np.logical_and.reduce(arrs)]
def get_date_attr(s, attr, name):
try:
result = getattr(s.dt, attr) # new in pandas 0.15
except AttributeError:
result = getattr(pd.DatetimeIndex(s), attr)
result.name = name
return result
@dispatch(DateTime, Series)
def compute_up(expr, s, **kwargs):
return get_date_attr(s, expr.attr, expr._name)
@dispatch(UTCFromTimestamp, Series)
def compute_up(expr, s, **kwargs):
return pd.datetools.to_datetime(s * 1e9, utc=True)
@dispatch(Millisecond, Series)
def compute_up(expr, s, **kwargs):
return get_date_attr(s, 'microsecond',
'%s_millisecond' % expr._child._name) // 1000
@dispatch(Slice, (DataFrame, Series))
def compute_up(expr, df, **kwargs):
index = expr.index
if isinstance(index, tuple) and len(index) == 1:
index = index[0]
if isinstance(index, _inttypes + (list,)):
return df.iloc[index]
elif isinstance(index, slice):
if index.stop is not None:
return df.iloc[index.start:index.stop:index.step]
else:
return df.iloc[index]
else:
raise NotImplementedError()
@dispatch(count, DataFrame)
def compute_up(expr, df, **kwargs):
result = df.shape[0]
if expr.keepdims:
result = Series([result], name=expr._name)
return result
@dispatch(nelements, (DataFrame, Series))
def compute_up(expr, df, **kwargs):
return df.shape[0]
@dispatch(DateTimeTruncate, Series)
def compute_up(expr, data, **kwargs):
return Series(compute_up(expr, into(np.ndarray, data), **kwargs),
name=expr._name)
@dispatch(IsIn, Series)
def compute_up(expr, data, **kwargs):
return data.isin(expr._keys)
@dispatch(Coerce, Series)
def compute_up(expr, data, **kwargs):
return data.astype(to_numpy_dtype(expr.schema))
| bsd-3-clause |
johnmgregoire/vanDover_CHESS | xrdUI.py | 1 | 428115 | global XRFALLOWED
try:
from xrf_analysis import *
XRFALLOWED=True
except:
XRFALLOWED=False
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from XRDdefaults import *
from xrd_fileIO_fcns import *
from xrd_math_fcns import *
from xrdPLOT import *
from xrd_diffraction_conversion_fcns import *
from xrf_depprof import *
import numpy, scipy.interpolate, pylab, operator, sys, os, time, copy, h5py, matplotlib, matplotlib.cm
import ui_mainmenu
import ui_message_box
import ui_import_image
import ui_import_attr
import ui_chessrunattr
import ui_get_group
import ui_int_params
import ui_chi_params
import ui_qq_params
import ui_h5file_info
import ui_analyze_qq
import ui_wavepeak_1d
import ui_associate_pkqq
import ui_associationtree
import ui_make_phases_menu
import ui_spatial_phases_menu
import ui_highlowDialog
import ui_bmin_menu
import ui_chiqDialog
import ui_plotsomenu
import ui_XRDSuite_params
import ui_h5scanDialog
import ui_pdfDialog
import ui_waveset1d_params
import ui_dep_prof
import ui_xrf_analysis
import ui_test
import ui_buildnewscan
import ui_mini_program_dialog
import ui_pdfsearch
import ui_LinBckndDialog
import ui_bckndinventoryDialog
import ui_editrawxrdDialog
#import ui_emptydialog
#def dummytask(secs):
# print 'dummy task exectued'
# time.sleep(secs)
def printtime():
print time.ctime()
def mygetopenfile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getOpenFileName(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getOpenFileName(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetsavefile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getSaveFileName(xparent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getSaveFileName(parent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetdir(parent=None, xpath="%s" % os.getcwd(),markstr='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getExistingDirectory(xparent,''.join(['Select directory:', markstr]), xpath))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getExistingDirectory(parent,''.join(['Select directory:', markstr]), xpath))
class MainMenu(QMainWindow,
ui_mainmenu.Ui_MainMenu):
def __init__(self, parent=None, datpath="%s" % os.getcwd(), h5path="%s" % os.getcwd(), runpath="%s" % os.getcwd()):
super(MainMenu, self).__init__(parent)
self.setupUi(self)
self.datpath = datpath
self.h5path = h5path
self.runpath = runpath
self.activepathcompare='xxxxxxxxx'
self.setallowedtasks()
def updateactivepath(self):
self.activepathcompare=''.join((os.path.split(self.h5path)[1], ' ', self.h5groupstr))
self.active_file_lineEdit.setText(self.activepathcompare)
def clearactivepath(self):
self.activepathcompare='xxxxxxxxx'
self.active_file_lineEdit.setText('')
def setallowedtasks(self):
self.actionXRF_analysis.setEnabled(XRFALLOWED)
@pyqtSignature("")
def on_performPushButton_clicked(self):
self.tasktext=unicode(self.taskTextBrowser.toPlainText())
self.tasktext=self.tasktext.strip()
self.tasktext=''.join((self.tasktext, '\n'))
self.performtasks()
def performtasks(self):
errorstr=''
try:
ACTIVEPATH=self.h5path
ACTIVEGRP=self.h5groupstr
except:
print 'NO ACTIVE PATH AND GROUP HAVE BEEN DEFINED'
self.lineendlist=[-1]
i=0
while i!=-1:
i=self.tasktext.find('\n', i+1)
if i!=-1:
self.lineendlist+=[i]
for i in range(len(self.lineendlist)-1):
# self.taskTextBrowser.setPlainText(''.join((self.tasktext[0:self.lineendlist[i]+1], '*', self.tasktext[self.lineendlist[i]+1:])))
# self.repaint()
cmdstr=self.tasktext[self.lineendlist[i]+1:self.lineendlist[i+1]]
print 'performing: ', cmdstr
if cmdstr.startswith('ACTIVEPATH='):
ACTIVEPATH=eval(cmdstr.partition('ACTIVEPATH=')[2])
elif cmdstr.startswith('ACTIVEGRP='):
temp=cmdstr.partition('ACTIVEGRP=')[2]
if 'DEFAULT' in temp:
ACTIVEGRP=getdefaultscan(ACTIVEPATH)
else:
ACTIVEGRP=eval(temp)
else:
errormsg=eval(cmdstr)
if not errormsg is None:
errorstr+='ERROR in '+cmdstr+ '\n\n'+errormsg
if len(errorstr)>0:
QMessageBox.warning(self,"ERROR REPORT", errorstr)
else:
QMessageBox.information(self, 'tasks Complete!', 'click "OK" to clear task list and continue program')
self.taskTextBrowser.setPlainText('')
self.setallowedtasks()
@pyqtSignature("")
def on_action_mini_program_txt_triggered(self):
idialog=mini_program_dialog(self)
if idialog.exec_():
self.addtask(idialog.cmdtext)
@pyqtSignature("")
def on_action_batch_initialize_triggered(self):
self.batchimportdatadialogcontrol()
@pyqtSignature("")
def on_action_synthimport_triggered(self):
synthpath=mygetopenfile(parent=self, markstr='synth txt file')
if len(synthpath)==0:
return
h5dir=mygetdir(parent=self, markstr='h5 save dir')
if len(h5dir)==0:
return
print len(synthpath), len(h5dir)
h5name=os.path.split(synthpath)[1]+'.h5'
h5path=os.path.join(h5dir, h5name).replace('\\','/')
self.addtask("createsynthetich5_peaktxt('"+h5path+"', '"+ synthpath+ "', elstr='ABC')")
@pyqtSignature("")
def on_action_import_txt_XRD_data_triggered(self):
synthpath=mygetopenfile(parent=self, markstr='first of txt files')
if len(synthpath)==0:
return
h5dir=mygetdir(parent=self, markstr='h5 save dir')
if len(h5dir)==0:
return
print len(synthpath), len(h5dir)
h5name=os.path.split(synthpath)[1]+'.h5'
h5path=os.path.join(h5dir, h5name).replace('\\','/')
self.addtask("createh5_txtfiles('"+h5path+"', '"+ synthpath+ "', headerlines=0, elstr='ABC')")
@pyqtSignature("")
def on_action_exportpeak_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for peak export')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
exportpeaklist(self.h5path, self.h5groupstr, self.runpath)
@pyqtSignature("")
def on_action_bckndinventory_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for peak export')
if temp!='':
h5pathtemp=temp
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
#QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=bckndinventoryDialog(self, self.h5path, h5groupstr=self.h5groupstr)
else:#if didn't find a groupstr the traditional way then find any group that has XRD data
h5file=h5py.File(h5pathtemp, mode='r')
grpnames=[]
for group in h5file.iterobjects():
if isinstance(group,h5py.Group) and 'measurement' in group:
group=group['measurement']
for xrdgrp in XRDgroupnames():
if xrdgrp in group and isinstance(group[xrdgrp],h5py.Group) and 'counts' in group[xrdgrp]:
grpnames+=[group[xrdgrp].name]
h5file.close()
perform=len(grpnames)>0
if not perform:
print 'no XRD data found in .h5 file'
if perform:
idialog=selectorDialog(self, grpnames, title='Select an experiment group')
perform=idialog.exec_()
if perform:
h5grppath=str(idialog.groupsComboBox.currentText())
idialog=bckndinventoryDialog(self, h5pathtemp, h5grppath=h5grppath)
idialog.exec_()
@pyqtSignature("")
def on_action_neighbor_calculation_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for neighbor calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=neighborwindow(self, self.h5path, self.h5groupstr, self.runpath)
idialog.exec_()
@pyqtSignature("")
def on_action_plot_sample_info_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for sample info plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plotinterpimageof1ddatawindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), style='info')
idialog.exec_()
@pyqtSignature("")
def on_action_textureanalysis_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for texture analysis')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plotinterpimageof1ddatawindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), style='texture')
idialog.exec_()
@pyqtSignature("")
def on_action_import_sample_info_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for sample info import')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
importfilepath = mygetopenfile(self, xpath=defaultdir('otherdata'), markstr='pointind, number data', filename='.txt' )
perform=importfilepath!=''
if perform:
importsampleinfotoh5(self.h5path, self.h5groupstr, importfilepath)
@pyqtSignature("")
def on_action_export_XRDSuite_files_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 1d->.plt')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
perform=('icounts' in h5mar)
if not perform:
QMessageBox.warning(self,"failed", 'ABORTED: cannot find necessary data')
if perform:
pointlist=h5analysis.attrs['pointlist']
qgrid=h5mar['icounts'].attrs['qgrid']
qvals=q_qgrid_ind(qgrid)
xtypelist=['q 1/nm','2th (deg)','d (nm)','pixels']
opts=['icounts']
if 'ifcounts' in h5mar:
opts+=['ifcounts (processed)']
idialog=XRDSuiteDialog(self, xtypelist, 'select a scattering variable', opts, 'select a type of 1d intensity array', qvals[0], qvals[-1])
if idialog.exec_(): #no exec_ if perform False
scale=idialog.scaleCheckBox.isChecked()
dpbool=idialog.CompComboBox.currentIndex()==1
xrfbool=idialog.CompComboBox.currentIndex()==1
imtype=unicode(idialog.imtypeComboBox.currentText()).partition(' ')[0]
if imtype.startswith('if'):
counts=readh5pyarray(h5mar['ifcounts'])
else:
counts=readh5pyarray(h5mar['icounts'])
xtype=unicode(idialog.xtypeComboBox.currentText())
low=idialog.qminSpinBox.value()
high=idialog.qmaxSpinBox.value()
lowind=numpy.where(qvals>=low)[0][0]
highind=qvals.shape[0]-numpy.where(qvals[-1:0:-1]<=high)[0][0]
qvals=qvals[lowind:highind]
attrdict=getattr(self.h5path, self.h5groupstr)
L=attrdict['cal'][2]
wl=attrdict['wavelength']
psize=attrdict['psize']
elstr=attrdict['elements']
types=['x(mm)', 'z(mm)']
if scale:
types+=['DPnmolcm2']
if xrfbool:
comptype='XRFmolfracALL'
elif dpbool:
comptype='DPmolfracALL'
else:
comptype=None
if not comptype is None:
elstrlist, compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=comptype)
elstr='\t'.join(elstrlist)
compsstr=elstr
infodict, success=getpointinfo(self.h5path, self.h5groupstr, types=types)
if not success or (not comptype is None and compsarr is None):
print 'ABORTING: not all info could be found'
return
if scale:
scalearr=1/infodict['DPnmolcm2']
else:
scalearr=numpy.ones(len(infodict['x(mm)']), dtype='float32')
coordsarr=numpy.array([infodict['x(mm)'], infodict['z(mm)']]).T
if 'pix' in xtype:
xvals=pix_q(qvals, L, wl, psize=psize)
t1='pix'
elif '(nm)' in xtype:
xvals=d_q(qvals)
# plotarr=numpy.array([plotarr[-1*i-1] for i in range(plotarr.size)])
# xvals=numpy.array([xvals[-1*i-1] for i in range(xvals.size)])
t1='d'
elif '2' in xtype:
xvals=twotheta_q(qvals, wl)
t1='2th'
else:
t1='q'
xvals=qvals
savename='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr))
coordsfilename=os.path.join(self.runpath,''.join((savename, '_coords.txt'))).replace('\\','/')
compsfilename=os.path.join(self.runpath,''.join((savename, '_comps.txt'))).replace('\\','/')
countsfilename=os.path.join(self.runpath,''.join((savename, '_', imtype, '_', t1, '_counts.txt'))).replace('\\','/')
coordsstr='x\tz'
countsstr=''
for x in xvals:
countsstr='\t'.join((countsstr, numtostring(x, 4)))
countsstr=countsstr[1:]
for ind in pointlist:
yvals=counts[ind, lowind:highind]
yvals*=scalearr[ind]
temp=''
for y in yvals:
temp='\t'.join((temp, numtostring(y, 7)))
countsstr='\n'.join((countsstr, temp[1:]))
temp=''
for c in coordsarr[ind]:
temp='\t'.join((temp, numtostring(c, 3)))
coordsstr='\n'.join((coordsstr, temp[1:]))
if not comptype is None:
temp=''
if len(compsarr[ind])==1:
temp='100.0'
else:
numstr=[numtostring(num*100.0, 4) for num in compsarr[ind][:-1]]
rest=100.0
for ns in numstr:
rest-=eval(ns)
numstr+=[numtostring(rest, 4)]
temp='\t'.join(numstr)
compsstr='\n'.join((compsstr, temp))
fout=open(coordsfilename, "w")
fout.write(coordsstr)
fout.close()
if not comptype is None:
fout=open(compsfilename, "w")
fout.write(compsstr)
fout.close()
fout=open(countsfilename, "w")
fout.write(countsstr)
fout.close()
h5file.close()
@pyqtSignature("")
def on_action_change_active_scan_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
temp=self.h5path
else:
temp = mygetopenfile(self, xpath=self.h5path, markstr='.h5 file for changing active scan')
perform=(temp!='')
if perform:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
h5file=h5py.File(self.h5path, mode='r+')
h5file.attrs['defaultscan']=str(self.h5groupstr)
h5file.close()
@pyqtSignature("")
def on_action_initialize_scan_triggered(self):
self.importdatadialogcontrol()
@pyqtSignature("")
def on_action_edit_DAQ_params_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for scan attribute edit')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
attrdicttemp = self.importattrDialogcaller(self, self.h5path, self.h5groupstr)
if attrdicttemp is not None:
writeattr(self.h5path, self.h5groupstr, attrdicttemp)
@pyqtSignature("")
def on_action_buildnewscan_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for scan attribute edit')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=buildnewscanDialog(self, self.h5path, self.h5groupstr)
if idialog.exec_():
destname=str(unicode(idialog.newnameLineEdit.text()))
h5file=h5py.File(self.h5path, mode='r')
if destname in h5file:
h5file.close()
QMessageBox.warning(self,"failed", "Aborting because new scan name already exists")
return None
h5file.close()
self.h5groupstr=destname
newscandict=idialog.createnewscandict()
if not newscandict is None:
buildnewscan(self.h5path, self.h5groupstr, newscandict)
self.updateactivepath()
self.importdatadialogcontrol(h5path=self.h5path, h5groupstr=self.h5groupstr, command='USER-COMPILED')
@pyqtSignature("")
def on_actionXRF_analysis_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for XRF analysis')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=xrfanalysisDialog(self, self.h5path, self.h5groupstr)
if idialog.exec_():
if idialog.parstr=='' or idialog.parstr is None:
print 'ABORTING XRF ANALYSIS: some error'
return
self.addtask(", ".join(("XRFanalysis(h5path='"+self.h5path+"'", "h5groupstr='"+self.h5groupstr+"'", idialog.parstr))+")")
@pyqtSignature("")
def on_actionDeposition_Profiling_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for Deposition Profile calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
attrdict=getattr(self.h5path, self.h5groupstr)
idialog=depprofDialog(self, attrdict['elements'])
if idialog.exec_():
gunpropdict=idialog.propdict
xcoords=attrdict['x']
zcoords=attrdict['z']
mdq=MappedDepQuantities(DepRates(gunpropdict, GunPosnDict(xcoords, zcoords)), gunpropdict)
for vals in mdq.itervalues():
if numpy.any(numpy.isnan(vals)):
print mdq
QMessageBox.warning(self,"failed", 'Deposition profiling aborted, NaN results. The dictionary of results was printed.')
return
writedepprof(self.h5path, self.h5groupstr, gunpropdict, mdq)
@pyqtSignature("")
def on_actionLinBcknd1d_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for Deposition Profile calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
print 'not yet implemented'
return
idialog=LinBckndDialog1d(self, self.h5path, self.h5groupstr)
if not (idialog.exec_() and idialog.perform):
return
othparstr=', f0vals='+ `idialog.fvals[0]`# not finished implementing
othparstr+=', f1vals='+ `idialog.fvals[1]`
othparstr+=', fraczeroed=%0.3f' %idialog.zerofracSpinBox.value()
othparstr+=', fprecision=%0.3f, rankfornorm=%0.3f' %(idialog.precisionSpinBox.value(), idialog.normrankSpinBox.value())
self.addtask(''.join(("linbckndsub1d(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr, othparstr, ")")))
@pyqtSignature("")
def on_action_calc_bcknd_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for background calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
attrdicttemp=getattr(self.h5path, self.h5groupstr)
if attrdicttemp is None:
QMessageBox.warning(self,"failed", "calc cancelled: cannot find scan attributes")
else:
bcknd=attrdicttemp['bcknd']
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
batchh5grpstrlist=[self.h5groupstr]
if ''.join(('b',bcknd[:3])) in h5mar:
tempstr=''.join((' - previous ',bcknd[:3],' background will be overwritten'))
else:
tempstr=''
h5file.close() #it i imperative thatthis be closed before LinBckndDialog executes, as 'r+' is used within
if 'min' in bcknd:
idialog=bminDialog(self)
if not idialog.exec_():
return
othparstr=', critfrac=%0.3f' %idialog.bminpercSpinBox.value()
elif 'lin' in bcknd:
idialog=LinBckndDialog(self, self.h5path, self.h5groupstr)
if not (idialog.exec_() and idialog.perform):
return
batchh5grpstrlist+=idialog.batchh5grpstrlist
othparstr=', critfrac=%0.3f' %idialog.zerofracSpinBox.value()
othparstr+=', weightprecision=%0.3f, normrank=%0.3f' %(idialog.precisionSpinBox.value(), idialog.normrankSpinBox.value())
else:
othparstr=''
idialog=messageDialog(self, ''.join((bcknd, ' background will be calculated', tempstr)))
if 'bin' in attrdicttemp.keys():
binstr='%d' %attrdicttemp['bin']
else:
binstr='3'
if idialog.exec_():
for h5groupstr in batchh5grpstrlist:
self.addtask(''.join(("calcbcknd(h5path='", self.h5path, "', h5groupstr='", h5groupstr, "', bcknd='", bcknd, "', bin=", binstr, othparstr, ")")))
@pyqtSignature("")
def on_action_copy_lin_bcknd_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for destination')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
perform=False
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 source file: from which blin will be copied')
if temp!='':
idialog=getgroupDialog(self, temp)
if idialog.exec_():
h5path_from=temp
h5groupstr_from=str(unicode(idialog.groupsComboBox.currentText()))
perform=True
if perform:
self.addtask("CopyLinBckndData('%s', '%s', '%s', '%s')" %(self.h5path, self.h5groupstr, h5path_from, h5groupstr_from))
@pyqtSignature("")
def on_action_process_1d_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for background calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=messageDialog(self, 'any existing processed 1D intensities will be overwritten')
if idialog.exec_():
self.addtask(''.join(("process1dint(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr, "', maxcurv=16.2)")))
@pyqtSignature("")
def on_action_process_texture_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for background calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'texture' in h5mar:
texgrplist=[]
h5tex=h5mar['texture']
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'icounts' in grp:
texgrplist+=[grp.name.rpartition('/')[2]]
idialog=selectorDialog(self, texgrplist, title='select texture dataset')
h5file.close()
else:
h5file.close()
print 'cannot calculate wave trans without texture data'
return
if len(texgrplist)>0 and idialog.exec_():
h5texgrpname=str(idialog.groupsComboBox.currentText())
self.addtask(''.join(("process1dint(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr, "', maxcurv=16.2, type='h5tex:", h5texgrpname, "')")))
@pyqtSignature("")
def on_actionBinImapChimap_triggered(self):
h5chess=CHESSRUNFILE()
itemnames=[]
for group in h5chess.iterobjects():
if isinstance(group, h5py.Group):
itemnames+=[group.name.rpartition('/')[2]]
h5chess.close()
idialog=selectorDialog(self, itemnames, title='select a CHESSrun group')
if idialog.exec_():
self.addtask(''.join(("binmapsinh5chess('",str(unicode(idialog.groupsComboBox.currentText())),"', bin=3)")))
@pyqtSignature("")
def on_action_plot_chessrun_arrays_triggered(self):
perform=False
path = mygetopenfile(self, xpath=CHESSRUNFILE(returnpathonly=True),markstr='chessrun .h5 file for background calculation')
if path!='':
idialog=plot2dchessrunwindow(self, path, self.runpath)
idialog.exec_()
@pyqtSignature("")
def on_action_choose_data_subset_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for background calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plot2dintwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), navkill=True)
idialog.exec_()
@pyqtSignature("")
def on_action_build_integration_map_triggered(self):
h5chess=CHESSRUNFILE()
itemnames=[]
for group in h5chess.iterobjects():
if isinstance(group, h5py.Group):
itemnames+=[group.name.rpartition('/')[2]]
h5chess.close()
idialog=selectorDialog(self, itemnames, title='select a CHESSrun group')
if idialog.exec_():
idialog2=intparamDialog(self)
if idialog2.exec_():
qmin=idialog2.qminSpinBox.value()
qmax=idialog2.qmaxSpinBox.value()
qint=idialog2.qintSpinBox.value()
qgridstr='['+','.join(tuple([labelnumberformat(num) for num in qgrid_minmaxint(qmin, qmax, qint)]))+']'
self.addtask(''.join(("buildintmap('",str(unicode(idialog.groupsComboBox.currentText())),"',", qgridstr, ",bin=3)")))
@pyqtSignature("")
def on_action_build_chi_map_triggered(self):
h5chess=CHESSRUNFILE()
itemnames=[]
for group in h5chess.iterobjects():
if isinstance(group, h5py.Group):
itemnames+=[group.name.rpartition('/')[2]]
h5chess.close()
idialog=selectorDialog(self, itemnames, title='select a CHESSrun group')
if idialog.exec_():
idialog2=chiparamDialog(self, str(unicode(idialog.groupsComboBox.currentText())))
if idialog2.exec_():
chimin=idialog2.chiminSpinBox.value()
chimax=idialog2.chimaxSpinBox.value()
chiint=idialog2.chiintSpinBox.value()
chigridstr='['+','.join(tuple([labelnumberformat(num) for num in qgrid_minmaxint(chimin, chimax, chiint)]))+']'
self.addtask(''.join(("buildchimap('",str(unicode(idialog.groupsComboBox.currentText())),"',", chigridstr, ",bin=3)")))
@pyqtSignature("")
def on_action_plot_imap_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for integration map')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plotimapwindow(self, self.h5path, self.h5groupstr, self.runpath)
idialog.exec_()
@pyqtSignature("")
def on_action_plot_1D_texture_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for texture plotting ')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plotimapwindow(self, self.h5path, self.h5groupstr, self.runpath, texture=True)
idialog.exec_()
@pyqtSignature("")
def on_action_plot1dwavetrans_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for plotting 1d wave transform')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
typelist=[]
if 'wavetrans1d' in h5mar:
type='h5mar:icounts'
typelist+=['h5mar:icounts']
if 'texture' in h5mar:
h5tex=h5mar['texture']
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'icounts' in grp:
typelist+=['h5tex:'+grp.name.rpartition('/')[2]]
idialog=selectorDialog(self, typelist, title='select type of 1d dataset')
if idialog.exec_():
type=str(idialog.groupsComboBox.currentText())
else:
return
h5file.close()
idialog=plotwavetrans1dwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), type=type)
idialog.exec_()
@pyqtSignature("")
def on_action_plotinterpimageof1ddata_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for plotting interpolation maps')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
typelist=[]
if 'icounts' in h5mar:
type='h5mar'
typelist+=['h5mar']
if 'texture' in h5mar:
h5tex=h5mar['texture']
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'icounts' in grp:
typelist+=['h5tex:'+grp.name.rpartition('/')[2]]
idialog=selectorDialog(self, typelist, title='select type of 1d dataset')
if idialog.exec_():
type=str(idialog.groupsComboBox.currentText())
else:
return
idialog=plotinterpimageof1ddatawindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), type=type)
idialog.exec_()
@pyqtSignature("")
def on_action_integrate_single_image_triggered(self):
self.integratecontrol(single=True)
@pyqtSignature("")
def on_action_integrate_entire_dataset_triggered(self):
self.integratecontrol(single=False)
@pyqtSignature("")
def on_action_plot_dat_triggered(self):
idialog=plotdatwindow(self, self.runpath)
idialog.exec_()
@pyqtSignature("")
def on_action_calcqq_triggered(self):
self.qqcalccontrol()
@pyqtSignature("")
def on_action_analyze_qq_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for analyzing qq')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=qqanalysisDialog(self)
if idialog.exec_():
curve='%d' %idialog.curve_spinBox.value()
counts='%d' %idialog.cts_spinBox.value()
clust='%.2f' %idialog.clust_spinBox.value()
self.addtask(''.join(("qqanalyze(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"', pkmincurve=",curve, ", pkminsqcts=", counts, ", qclusterradius=", clust, ")")))
@pyqtSignature("")
def on_action_1d_peak_search_single_triggered(self):
self.peak1dcontrol(single=True)
@pyqtSignature("")
def on_action_1d_peak_search_all_triggered(self):
self.peak1dcontrol(single=False)
@pyqtSignature("")
def on_action_1d_peak_search_tex_triggered(self):
self.peak1dcontrol(single=False, type='h5tex')
@pyqtSignature("")
def on_action_fit_1d_peaks_triggered(self):
self.peakfitcontrol()
@pyqtSignature("")
def on_action_fit_1d_peaks_tex_triggered(self):
self.peakfitcontrol(type='h5tex')
@pyqtSignature("")
def on_action_associate_1d_qqpeaks_single_triggered(self):
self.pkassociatecontrol(single=True)
@pyqtSignature("")
def on_action_associate_1d_qqpeaks_all_triggered(self):
self.pkassociatecontrol(single=False)
@pyqtSignature("")
def on_action_group_into_phases_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for phase grouping')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=makephasesDialog(self)
if idialog.exec_():
critqqnorm='%.2f' %idialog.critqqnormSpinBox.value()
critnumqqpks='%d' %idialog.numqqpksSpinBox.value()
critnumipks='%d' %idialog.numipksSpinBox.value()
self.addtask(''.join(("makephases(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"', critqqnorm=",critqqnorm, ", critnumqqpks=", critnumqqpks, ", critnumipks=", critnumipks, ")")))
@pyqtSignature("")
def on_action_spatial_phases_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for spatial analysis of phases')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=spatialphasesDialog(self)
if idialog.exec_():
critblobsep='%.2f' %idialog.critblobsepSpinBox.value()
critnumqqpks='%d' %idialog.numqqpksSpinBox.value()
critnumpts='%d' %idialog.numptsSpinBox.value()
self.addtask(''.join(("spatialanalysisofphases(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"', critnumqqpks=",critnumqqpks, ", critblobsep=", critblobsep, ', minptsinblob=', critnumpts,")")))
@pyqtSignature("")
def on_action_plot_2D_intensity_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 2d intensity plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plot2dintwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex())
idialog.exec_()
@pyqtSignature("")
def on_action_plot_1D_intensity_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 1d intensity plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
typelist=[]
if 'icounts' in h5mar:
type='h5mar'
typelist+=['h5mar']
if 'texture' in h5mar:
h5tex=h5mar['texture']
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'icounts' in grp:
typelist+=['h5tex:'+grp.name.rpartition('/')[2]]
idialog=selectorDialog(self, typelist, title='select type of 1d dataset')
if idialog.exec_():
type=str(idialog.groupsComboBox.currentText())
else:
return
idialog=plot1dintwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), type=type)
idialog.exec_()
@pyqtSignature("")
def on_action_fix1dbcknd_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 1d intensity plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plot1dintwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), bckndedit=True)
idialog.exec_()
@pyqtSignature("")
def on_action_addpeaks_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 1d intensity plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plot1dintwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), addpeaks=True)
idialog.exec_()
@pyqtSignature("")
def on_action_removepeaks_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 1d intensity plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plot1dintwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), removepeaks=True)
idialog.exec_()
@pyqtSignature("")
def on_action_association_trees_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for association tree')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=associationtreedialog(self, self.h5path, self.h5groupstr)
idialog.exec_()
@pyqtSignature("")
def on_action_plot_qq_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for qq plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plotqqwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex())
idialog.exec_()
@pyqtSignature("")
def on_action_association_trees_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for qq plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plotqqwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex(), displaytrees=True)
idialog.exec_()
@pyqtSignature("")
def on_action_save_all_1d_plt_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 1d->.plt')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
qgrid=h5mar['icounts'].attrs['qgrid']
qvals=q_qgrid_ind(qgrid)
pointlist=h5analysis.attrs['pointlist']
xtypelist=['q 1/nm','2th (deg)','d (nm)','pixels']
idialog=plotsoDialog(self, xtypelist, qvals[0], qvals[-1], title='select a scattering variable')
if idialog.exec_():
scale=idialog.densityCheckBox.isChecked()
xtype=unicode(idialog.typeComboBox.currentText())
low=idialog.lowSpinBox.value()
high=idialog.highSpinBox.value()
lowind=numpy.where(qvals>=low)[0][0]
highind=qvals.shape[0]-numpy.where(qvals[-1:0:-1]<=high)[0][0]
qvals=qvals[lowind:highind]
attrdict=getattr(self.h5path, self.h5groupstr)
L=attrdict['cal'][2]
wl=attrdict['wavelength']
psize=attrdict['psize']
if scale:
infodict, success=getpointinfo(self.h5path, self.h5groupstr, types=['DPnmolcm2'])
if not success:
print 'ABORTING: not all info could be found'
return
scalearr=1/infodict['DPnmolcm2']
else:
scalearr=numpy.ones(max(pointlist)+1, dtype='float32')
if 'pix' in xtype:
xvals=pix_q(qvals, L, wl, psize=psize)
t1='pix'
elif '(nm)' in xtype:
xvals=d_q(qvals)
# plotarr=numpy.array([plotarr[-1*i-1] for i in range(plotarr.size)])
# xvals=numpy.array([xvals[-1*i-1] for i in range(xvals.size)])
t1='d'
elif '2' in xtype:
xvals=twotheta_q(qvals, wl)
t1='2th'
else:
t1='q'
xvals=qvals
if scale:
scalestr='scaledIvs'
else:
scalestr='Ivs'
savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, scalestr, t1, ''))
pointers=[h5mar['icounts']]
if 'ifcounts' in h5mar:
pointers+=[h5mar['ifcounts']]
for pnt in pointers:
for pointind in pointlist:
yvals=pnt[pointind, lowind:highind]*scalearr[pointind]#index out of bounds
writeplotso(self.runpath, xvals, yvals, attrdict, t1, ''.join((savename1, pnt.name.rpartition('/')[2], `pointind`)))
h5file.close()
@pyqtSignature("")
def on_action_save_2d_image_dataset_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 2d data->.png')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
typelist=['raw','bckndsubtracted', 'banom', 'totalbcknd', 'singlebcknd']
idialog=selectorDialog(self, typelist, title='select a 2d image type')
if idialog.exec_():
typestr=str(unicode(idialog.groupsComboBox.currentText()))
type=typelist.index(typestr)
savetypelist=['png from binned data', 'png with x2 furhter binning', 'png with x10 furhter binning', 'dat from binned data', 'dat with x2 furhter binning', 'dat with x10 furhter binning' ]
idialog=selectorDialog(self, savetypelist, title='select a save type')
if idialog.exec_():
saveind=savetypelist.index(str(unicode(idialog.groupsComboBox.currentText())))
extrabin=[1, 2, 10][saveind%3]
datsave=bool(saveind//3)
if not datsave:
idialog=highlowDialog(self, "Enter range for colorbar - cancel for auto")
if idialog.exec_():
colorrange=(idialog.lowSpinBox.value(), idialog.highSpinBox.value())
else:
colorrange=None
writeall2dimages(self.runpath, self.h5path, self.h5groupstr, type, typestr, colorrange=colorrange, datsave=datsave, extrabin=extrabin)
@pyqtSignature("")
def on_action_export_cfg_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for 2d data->.png')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
if 'depprof' in h5analysis:
h5depprof=h5analysis['depprof']
gunpropdict=ReadGunPropDict(h5analysis)
if not 'xrf/cfg' in h5analysis:
QMessageBox.warning(self,"failed", 'ABORTED: XRF data not found')
return
h5xrf=h5analysis['xrf']
cfg=readh5pyarray(h5xrf['cfg'])
inds=list(numpy.where(cfg!='')[0])
inds=[`i` for i in inds]
idialog=selectorDialog(self, inds, title='select a pointind')
if idialog.exec_():
indstr=str(unicode(idialog.groupsComboBox.currentText()))
ind=inds.index(indstr)
cfgpath=os.path.join(self.runpath, ''.join((os.path.split(self.h5path)[1][0:-3], '_', self.h5groupstr.rpartition('.')[2], '_', indstr, '.cfg'))).replace('\\','/').encode()
f=open(cfgpath,mode='w')
f.write(cfg[ind])
f.close()
@pyqtSignature("")
def on_action_edit_raw_diff_data_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
h5path=self.h5path
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for editing raw XRD')
if temp!='':
h5path=temp
perform=True
if perform:
h5file=h5py.File(h5path, mode='r')
grpnames=[]
for group in h5file.iterobjects():
if isinstance(group,h5py.Group) and 'measurement' in group:
group=group['measurement']
for xrdgrp in XRDgroupnames():
if xrdgrp in group and isinstance(group[xrdgrp],h5py.Group) and 'counts' in group[xrdgrp]:
grpnames+=[group[xrdgrp].name]
h5file.close()
perform=len(grpnames)>0
if not perform:
print 'no XRD data found in .h5 file'
if perform:
idialog=selectorDialog(self, grpnames, title='Select an experiment group')
perform=idialog.exec_()
if perform:
h5grppath=str(idialog.groupsComboBox.currentText())
idialog=editrawxrdwindow(self, h5path, h5grppath=h5grppath) #these are not self.h5path because this fcn can run on any group with xrd data (no itinilization necessary)
if idialog.exec_():
QMessageBox.warning(self,"Only Raw data modified", 'The "edit raw data" has successfully completed but\nany existing binned images, background calculations, etc.\ndo not yet reflect this edit. The cleanest way to edit raw data\nis to run "initialize.." and restart XRD analysis.')
@pyqtSignature("")
def on_action_image_histogram_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for histogram plotting')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=plothistwindow(self, self.h5path, self.h5groupstr, self.runpath, self.navchoiceComboBox.currentIndex())
idialog.exec_()
@pyqtSignature("")
def on_action_H5file_info_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for file info retrieval')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
idialog=h5fileinfoDialog(self, self.h5path, self.h5groupstr)
idialog.exec_()
@pyqtSignature("")
def on_action_calcqchiimages_triggered(self):
h5chess=CHESSRUNFILE()
itemnames=[]
for group in h5chess.iterobjects():
if isinstance(group, h5py.Group):
itemnames+=[group.name.rpartition('/')[2]]
h5chess.close()
idialog=selectorDialog(self, itemnames, title='select a CHESSrun group')
if idialog.exec_():
self.addtask(''.join(("calcqchiimages('", unicode(idialog.groupsComboBox.currentText()), "', alsocalcbin=2,equate_chi_azim=True)")))
@pyqtSignature("")
def on_action_createchessrun_triggered(self):
idialog = chessrunattrDialog(self)
if idialog.exec_():
attrdict={
'wavelength':idialog.wavelengthSpinBox.value(),
'cal':[idialog.xcenSpinBox.value(), idialog.ycenSpinBox.value(), idialog.LSpinBox.value(), idialog.martiltSpinBox.value(), idialog.tiltrotSpinBox.value()],
'alpha':idialog.alphaSpinBox.value(),
'detectorshape':(idialog.shape0SpinBox.value(),idialog.shape1SpinBox.value()), #also fit2D style horixzontal,vertical which is transpose of indeces
'tiltdirection':str(idialog.tiltdirectionComboBox.currentText()),
'xrdname':str(idialog.xrdnameLineEdit.text()),
'psize':idialog.psizeSpinBox.value(),
}
h5chess=CHESSRUNFILE('r+')
grpname=str(unicode(idialog.nameLineEdit.text()))
if grpname in h5chess:
del h5chess[grpname]
group=h5chess.create_group(grpname)
for key, val in attrdict.iteritems():
group.attrs[key]=val
group.create_group('imap')
group.create_group('chimap')
group.create_group('killmap')
h5chess.close()
@pyqtSignature("")
def on_action_calc_waveset1d_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for file info retrieval')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
itemlist=['qgrid for powder patterns','chigrid for texture analysis']
idialog=selectorDialog(self, itemlist, title='select an application, i.e. a type of data for extracting design parameters')
if idialog.exec_():
selection=itemlist.index(str(idialog.groupsComboBox.currentText()))
if selection==0:
grid=getimapqgrid(h5analysis.attrs['imapstr'], imap=False)
else:
grid=getchimapchigrid(h5analysis.attrs['chimapstr'], chimap=False)
else:
grid=None
h5file.close()
else:
grid=None
idialog=waveset1dparamDialog(self, grid)
if idialog.exec_():
qsmin=idialog.qsminSpinBox.value()
qsmax=idialog.qsmaxSpinBox.value()
qsint=idialog.qsintSpinBox.value()
qsgridstr='['+','.join(tuple([labelnumberformat(num) for num in scalegrid_minmaxint(qsmin, qsmax, qsint)]))+']'
qpmin=idialog.qpminSpinBox.value()
qpmax=idialog.qpmaxSpinBox.value()
qpint=idialog.qpintSpinBox.value()
qpgridstr='['+','.join(tuple([labelnumberformat(num) for num in qgrid_minmaxint(qpmin, qpmax, qpint)]))+']'
qmin=idialog.qminSpinBox.value()
qmax=idialog.qmaxSpinBox.value()
qint=idialog.qintSpinBox.value()
qgridstr='['+','.join(tuple([labelnumberformat(num) for num in qgrid_minmaxint(qmin, qmax, qint)]))+']'
self.addtask(''.join(("buildwaveset1d(qscalegrid=", qsgridstr, ", qposngrid=", qpgridstr, ", qgrid=", qgridstr, ",maxfixenfrac=",`idialog.fixenSpinBox.value()`,")")))
@pyqtSignature("")
def on_action_wavetrans1d_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for wavelet transform calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'icounts' in h5mar:
qgrid=h5mar['icounts'].attrs['qgrid']
h5file.close()
h5wave=WAVESET1dFILE()
selectlist=[]
namedict={}
for grp in h5wave.iterobjects():
if isinstance(grp, h5py.Group):
selstr, garb, qgridstr=(grp.name.rpartition('/')[2]).replace('_', 'qposngrid:', 1).partition('_')
waveqgrid=grp.attrs['qgrid']
if set(q_qgrid_ind(waveqgrid)).issubset(set(q_qgrid_ind(qgrid))):
selstr='qscalegrid:'+selstr
namedict[selstr]=grp.name.rpartition('/')[2]
selectlist+=[selstr]
idialog=selectorDialog(self, selectlist, title='select wavelet set to use')
if idialog.exec_():
namestr=str(unicode(idialog.groupsComboBox.currentText()))
self.addtask(''.join(("wavetrans1d('", self.h5path, "','", self.h5groupstr, "','", namedict[namestr],"')")))
else:
h5file.close()
print 'cannot calculate wave trans without icounts'
@pyqtSignature("")
def on_action_wavetranstex_triggered(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for wavelet transform calculation')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'texture' in h5mar:
texgrplist=[]
h5tex=h5mar['texture']
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'icounts' in grp:
texgrplist+=[grp.name.rpartition('/')[2]]
idialog=selectorDialog(self, texgrplist, title='select texture dataset')
else:
h5file.close()
print 'cannot calculate wave trans without texture data'
return
if len(texgrplist)>0 and idialog.exec_():
h5texgrpname=str(idialog.groupsComboBox.currentText())
h5texgrp=h5tex[h5texgrpname]
qgrid=h5texgrp.attrs['chigrid']
h5file.close()
h5wave=WAVESET1dFILE()
selectlist=[]
namedict={}
for grp in h5wave.iterobjects():
if isinstance(grp, h5py.Group):
selstr, garb, qgridstr=(grp.name.rpartition('/')[2]).replace('_', 'qposngrid:', 1).partition('_')
waveqgrid=grp.attrs['qgrid']
if set(q_qgrid_ind(waveqgrid)).issubset(set(q_qgrid_ind(qgrid))):
selstr='qscalegrid:'+selstr
namedict[selstr]=grp.name.rpartition('/')[2]
selectlist+=[selstr]
idialog=selectorDialog(self, selectlist, title='select wavelet set to use')
if idialog.exec_():
namestr=str(unicode(idialog.groupsComboBox.currentText()))
self.addtask(''.join(("wavetrans1d('", self.h5path, "','", self.h5groupstr, "','", namedict[namestr],"', type='h5tex:", h5texgrpname, "')")))
@pyqtSignature("")
def on_actionExit_triggered(self):
raise SystemExit
def importdatadialogcontrol(self, h5path=None, h5groupstr=None, command=None, markstr=''):
"""data is automatically binned at 3. uses gui for getting parametrs, but chessrun parameters taken from chessrun h5 group attrs"""
if h5path is None or h5groupstr is None:
self.clearactivepath()
self.h5path=mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for scan initialization')
command=None
idialog2=importh5scanDialog(self, self.h5path)
if not idialog2.exec_():
return
temp=unicode(idialog2.scanComboBox.currentText())
self.h5groupstr, temp, command=temp.partition(':')
self.h5groupstr=str(self.h5groupstr)
else:
self.h5path=h5path
self.h5groupstr=h5groupstr
self.updateactivepath()
h5file=h5py.File(self.h5path, mode='r+')
if not 'analysis' in h5file[self.h5groupstr]:
h5file[self.h5groupstr].create_group('analysis')
h5file.close()
attrdicttemp = self.importattrDialogcaller(self, self.h5path, self.h5groupstr, command=command)
if attrdicttemp is None:
return
writeattr(self.h5path, self.h5groupstr, attrdicttemp)
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
xrdname=getxrdname(h5analysis)
if not xrdname in h5analysis:
h5analysis.create_group(xrdname)
h5file.close()
idialog=editrawxrdwindow(self, self.h5path, h5groupstr=self.h5groupstr)
idialog.exec_()
self.addtask(''.join(("initializescan('", self.h5path, "','", self.h5groupstr, "',bin=2)")))
def batchimportdatadialogcontrol(self, markstr=''):
"""data is automatically binned at 2. uses gui for getting parametrs, but chessrun parameters taken from chessrun h5 group attrs"""
self.clearactivepath()
self.h5path=mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for batch scan initialization')
command=None
idialog2=importh5scanDialog(self, self.h5path)
for optstr in idialog2.optionlist:
print optstr
self.h5groupstr, temp, command=optstr.partition(':')
self.h5groupstr=str(self.h5groupstr)
self.updateactivepath()
h5file=h5py.File(self.h5path, mode='r+')
if not 'analysis' in h5file[self.h5groupstr]:
h5file[self.h5groupstr].create_group('analysis')
h5file.close()
attrdicttemp = self.importattrDialogcaller(self, self.h5path, self.h5groupstr, command=command)
if attrdicttemp is None:
return
writeattr(self.h5path, self.h5groupstr, attrdicttemp)
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
xrdname=getxrdname(h5analysis)
if not xrdname in h5analysis:
h5analysis.create_group(xrdname)
h5file.close()
idialog=editrawxrdwindow(self, self.h5path, h5groupstr=self.h5groupstr)
idialog.exec_()
self.addtask(''.join(("initializescan('", self.h5path, "','", self.h5groupstr, "',bin=2)")))
def integratecontrol(self, single=True):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for integration')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
pointlist=h5analysis.attrs['pointlist']
namelist=[]
namelist+=['%d' %p for p in pointlist]
namelist+=['raw%d' %p for p in pointlist]
namelist+=['banom%d' %p for p in pointlist]
for dset in h5mar.iterobjects():
if isinstance(dset, h5py.Dataset) and len(dset.shape)==2 and not ('bin' in dset.name.rpartition('/')[2]) and (dset.name.rpartition('/')[2]).startswith('b'):
namelist+=[dset.name.rpartition('/')[2]]
h5file.close()
perform=False
bckndbool=True
if len(namelist)>0:
singlecommand=''
if single:
idialog=selectorDialog(self, namelist, title='select an image to integrate')
if idialog.exec_():
imname=str(unicode(idialog.groupsComboBox.currentText()))
singlecommand=''.join((", singleimage='", imname,"'"))
perform=True
if imname.startswith('b') or ('raw' in imname):
bckndbool=False
else:
perform=True
if perform:
self.addtask(''.join(("integrate(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"'", singlecommand, ", bckndbool=", `bckndbool`, ")")))
else:
QMessageBox.warning(self,"failed", "no images found")
def qqcalccontrol(self):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for calculating qq')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if not ('icounts' in h5mar):
h5file.close()
print 'cannot perform qqcalc due to absence of icounts'
return
defqgrid=h5mar['icounts'].attrs['qgrid']
opts=[]
if 'ifcounts' in h5mar:
opts+=['ifcounts (processed)']
opts+=['icounts']
h5file.close()
idialog=qqparamDialog(self, defqgrid, opts, 'select a type of 1d intensity array')
if idialog.exec_():
imagecommand=unicode(idialog.typeComboBox.currentText()).partition(' ')[0]
imagecommand=''.join((", image='", imagecommand,"'"))
qmin=idialog.qminSpinBox.value()
qmax=idialog.qmaxSpinBox.value()
qint=idialog.qintSpinBox.value()
qgridstr='[%.2f, %.2f, %.2f]' %tuple(qgrid_minmaxint(qmin, qmax, qint))
self.addtask(''.join(("qqcalc(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"', qgrid=", qgridstr, imagecommand, ")")))
def peakfitcontrol(self, type='h5mar'):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for finding peaks in 1d intensity')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
namelist=[]
if ('h5mar' in type) and ('ifcounts' in h5mar) and ('wavetrans1d' in h5mar) and ('peaks' in h5mar['wavetrans1d']):
if ('additionalpeaks' in h5mar) and h5mar['additionalpeaks'].attrs['usedinfitting']==0:
peakfitstr=', use_added_peaks=True'
else:
peakfitstr=''
namelist=['ifcounts']
elif ('h5tex' in type) and 'texture' in h5mar:
h5tex=h5mar['texture']
namelist=[]
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'wavetrans1d' in grp and ('peaks' in grp['wavetrans1d']):
namelist+=[grp.name.rpartition('/')[2]]
if len(namelist)==0:
h5file.close()
print 'cannot calculate wave trans without texture data'
return
else:
idialog=selectorDialog(self, namelist, title='select texture dataset')
if idialog.exec_():
grpstr=str(idialog.groupsComboBox.currentText())
if ('h5tex' in type):
if ('additionalpeaks' in h5tex[grpstr]) and h5tex[grpstr]['additionalpeaks'].attrs['usedinfitting']==0:
peakfitstr=', use_added_peaks=True'
else:
peakfitstr=''
h5file.close()
typecommand=''.join((", type='", type, ':', grpstr,"'"))
self.addtask(''.join(("peakfit1d(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr, "'", typecommand,", windowextend_hwhm=3, peakshape='Gaussian', critresidual=.2",peakfitstr,")")))
else:
h5file.close()
def peak1dcontrol(self, single=True, type='h5mar'):
perform=False
if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
perform=True
else:
temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for finding peaks in 1d intensity')
if temp!='':
if self.default_scan_checkBox.isChecked():
tempgrp=getdefaultscan(temp)
if tempgrp is None:
QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
perform=False
else:
self.h5path=temp
self.h5groupstr=tempgrp
self.updateactivepath()
perform=True
else:
idialog=getgroupDialog(self, temp)
if idialog.exec_():
self.h5path=temp
self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
self.updateactivepath()
perform=True
if perform:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
namelist=[]
if ('h5mar' in type) and ('wavetrans1d' in h5mar) and ('wavetrans' in h5mar['wavetrans1d']):
h5file.close()
namelist=['icounts']
elif ('h5tex' in type) and 'texture' in h5mar:
h5tex=h5mar['texture']
namelist=[]
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'wavetrans1d' in grp:
namelist+=[grp.name.rpartition('/')[2]]
if len(namelist)==0:
h5file.close()
print 'cannot perform peak search because cannot find wavelet transformation'
idialog=wavepeak1dDialog(self, namelist, 'select a type of 1d intensity array for peak search')
if idialog.exec_():
typecommand=''.join((", type='", type, ':', str(idialog.typeComboBox.currentText()),"'"))
minridgelength='%d' %idialog.minridgelength_spinBox.value()
minchildlength='%d' %idialog.minchildlength_spinBox.value()
minridgewtsum='%.2f' %idialog.minridgewtsum_spinBox.value()
minchildwtsum='%.2f' %idialog.minchildwtsum_spinBox.value()
wavenoisecutoff='%.2f' %idialog.wavenoisecutoff_spinBox.value()
maxqs='%.2f' %idialog.maxqs_spinBox.value()
self.addtask(''.join(("wavepeaksearch1d(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr, "'", typecommand,", minridgelength=", minridgelength, ", minchildlength=", minchildlength, ", minridgewtsum=", minridgewtsum, ", minchildwtsum=", minchildwtsum,", maxqscale_localmax=", maxqs, ", wavenoisecutoff=", wavenoisecutoff, ")")))
#this was written to allow peak searching in single spectra and in ifcounts but not currently supported
# def peak1dcontrol(self, single=True):
# perform=False
# if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
# perform=True
# else:
# temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for finding peaks in 1d intensity')
# if temp!='':
# if self.default_scan_checkBox.isChecked():
# tempgrp=getdefaultscan(temp)
# if tempgrp is None:
# QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
# perform=False
# else:
# self.h5path=temp
# self.h5groupstr=tempgrp
# self.updateactivepath()
# perform=True
# else:
# idialog=getgroupDialog(self, temp)
# if idialog.exec_():
# self.h5path=temp
# self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
# self.updateactivepath()
# perform=True
# if perform:
# h5file=h5py.File(self.h5path, mode='r')
# h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((self.h5groupstr, 'analysis/mar345'))]
#
# pointlist=h5analysis.attrs['pointlist']
#
# namelist=[]
# if 'icounts' in h5mar:
# if single:
# namelist+=['i%d' %p for p in pointlist]
# else:
# namelist+=['icounts']
# if 'ifcounts' in h5mar:
# if single:
# namelist+=['if%d' %p for p in pointlist]
# else:
# namelist+=['ifcounts']
#
# if single:
# for node in h5mar.iterobjects():
# if node.name.startswith('i') and isinstance(node, h5py.Dataset) and len(node.shape)==1:
# namelist+=[node.name]
#
# h5file.close()
# perform=False
# if len(namelist)>0:
# idialog=wavepeak1dDialog(self, namelist, 'select a type of 1d intensity array for peak search')
# if idialog.exec_():
# imagecommand=''.join((", image='", unicode(idialog.typeComboBox.currentText()).partition(' '),"'"))
# perform=True
# if perform:
# minridgelength='%d' %idialog.minridgelength_spinBox.value()
# wavenoisecutoff='%.2f' %idialog.wavenoisecutoff_spinBox.value()
# self.addtask(''.join(("wavepeaksearch1d(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"', minridgelength=",minridgelength, ", wavenoisecutoff=", wavenoisecutoff, imagecommand, ")")))
# else:
# QMessageBox.warning(self,"failed", "no intensity arrays found")
# def pkassociatecontrol(self, single=True):
# perform=False
# if self.activepathcheckBox.isChecked() and unicode(self.active_file_lineEdit.text())==self.activepathcompare:
# perform=True
# else:
# temp = mygetopenfile(self, xpath=self.h5path,markstr='.h5 file for associating 1d peaks with qqpeaks')
# if temp!='':
# if self.default_scan_checkBox.isChecked():
# tempgrp=getdefaultscan(temp)
# if tempgrp is None:
# QMessageBox.warning(self,"failed", 'No default grp found - run initialize')
# perform=False
# else:
# self.h5path=temp
# self.h5groupstr=tempgrp
# self.updateactivepath()
# perform=True
# else:
# idialog=getgroupDialog(self, temp)
# if idialog.exec_():
# self.h5path=temp
# self.h5groupstr=str(unicode(idialog.groupsComboBox.currentText()))
# self.updateactivepath()
# perform=True
# if perform:
# singlecommand=''
# perform=False
# fulldergrpstr=''.join(('h5file',self.h5groupstr, '.Derived'))
# h5file=tables.openFile(self.h5path, mode='r')
# dergrp=eval(fulldergrpstr)
# namelist=[]
# for node in dergrp:
# if node.name.startswith('k') and node.name[1:].isdigit():
# namelist+=[node.name]
# h5file.close()
# if len(namelist)>0:
# namelist.sort()
# if single:
# idialog=selectorDialog(self, namelist, title='select peak list for qq association')
# if idialog.exec_():
# imname=str(unicode(idialog.groupsComboBox.currentText()))
# singlecommand=''.join((", singleimage='", imname,"'"))
# perform=True
# else:
# perform=True
# else:
# QMessageBox.warning(self,"failed", "no intensity arrays found")
# if perform:
# idialog=peakqqassociationDialog(self)
# if idialog.exec_():
# qqaaft='(%.2f,%.2f)' %(idialog.qanisofrac_spinBox.value(), idialog.qalloyfrac_spinBox.value())
# qqsigcritsep='%.2f' %idialog.qqsig_spinBox.value()
# qqnormcritval='%.2f' %idialog.qqnorm_spinBox.value()
# self.addtask(''.join(("peak1dassociation(h5path='", self.h5path, "', h5groupstr='", self.h5groupstr,"', qqanisoalloyfractup=",qqaaft, ", qqsigcritsep=", qqsigcritsep, ", qqnormcritval=", qqnormcritval, singlecommand,")")))
def addtask(self, cmdstr):
#self.taskTextBrowser.append(''.join((cmdstr, '\n')))
self.taskTextBrowser.append(cmdstr)
def importattrDialogcaller(self, p1, p2, p3, command=None):
idialog = importattrDialog(p1, p2, p3, command=command)
if idialog.exec_():
ellineditlist=[idialog.el1LineEdit, idialog.el2LineEdit, idialog.el3LineEdit, idialog.el4LineEdit]
ellist=[str(unicode(le.text())) for le in ellineditlist]
xgrid=(idialog.xstartSpinBox.value(), idialog.xintSpinBox.value(), idialog.xptsSpinBox.value())
zgrid=(idialog.zstartSpinBox.value(), idialog.zintSpinBox.value(), idialog.zptsSpinBox.value())
returndict ={
'wavelength':idialog.wavelengthSpinBox.value(),
'command':str(unicode(idialog.cmdLineEdit.text())),
'elements':ellist,
'xgrid':xgrid,
'zgrid':zgrid,
'counter':idialog.inttimeSpinBox.value(),
'cal':[idialog.xcenSpinBox.value(), idialog.ycenSpinBox.value(), idialog.LSpinBox.value(), idialog.martiltSpinBox.value(), idialog.tiltrotSpinBox.value()],
'alpha':idialog.alphaSpinBox.value(),
'bcknd':str(unicode(idialog.bckndComboBox.currentText())),
'chessrunstr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())))),
'imapstr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())), 'imap', str(unicode(idialog.imapcomboBox.currentText())))),
'chimapstr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())), 'chimap', str(unicode(idialog.chimapcomboBox.currentText())))),
'killmapstr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())), 'killmap', str(unicode(idialog.killmapcomboBox.currentText())))),
'qimagestr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())), 'qimage')),
'chiimagestr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())), 'chiimage')),
'dqchiimagestr':'/'.join(('', str(unicode(idialog.chessruncomboBox.currentText())), 'dqchiimage')),
'xrdname':str(idialog.xrdnameLineEdit.text()),
'psize':idialog.psizeSpinBox.value(),
}
if returndict['command']!='USER-COMPILED':
if idialog.usespecCheckBox.isChecked():
for k, v in idialog.fromspecattr.iteritems():
returndict[k]=v
else:
for k, v in specattr_xzgrid(xgrid, zgrid, 'mesh' in returndict['command']).iteritems():
returndict[k]=v
return returndict
else:
return None
class bckndinventoryDialog(QDialog,
ui_bckndinventoryDialog.Ui_bckndinventoryDialog):
#***
def __init__(self, parent, h5path, h5groupstr=None, h5grppath=None):
super(bckndinventoryDialog, self).__init__(parent)
self.setupUi(self)
self.h5path=h5path
self.h5file=h5py.File(self.h5path, mode='r')
if not h5groupstr is None:
self.h5groupstr=h5groupstr
self.h5analysis=self.h5file['/'.join((self.h5groupstr, 'analysis'))]
self.h5mar=self.h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(self.h5analysis)))]
self.h5marcounts=self.h5file['/'.join((self.h5groupstr,'measurement', getxrdname(self.h5analysis),'counts'))]
self.attrdict=getattr(self.h5path, self.h5groupstr)
chessrungrpname=self.attrdict['chessrunstr']
else:
self.h5mar=None
self.h5marcounts=self.h5file[h5grppath]['counts']
chessrungrpname=''
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
QObject.connect(self.buttonBox,SIGNAL("rejected()"),self.ExitRoutine)
QObject.connect(self.copyPushButton,SIGNAL("pressed()"),self.performcopy)
self.h5chess=CHESSRUNFILE(mode='r+')
grpnames=[]
for group in self.h5chess.iterobjects():
if isinstance(group,h5py.Group):
grpnames+=[group.name]
perform=len(grpnames)>0
if not perform:
print 'no chess groups found in .h5 file'
if perform:
if chessrungrpname in grpnames:
setindex=grpnames.index(chessrungrpname)
else:
setindex=0
#idialog=selectorDialog(self, grpnames, title='Select an h5chess group to store Bcknd images', setindex=setindex)
#perform=idialog.exec_()
if perform:
#chessrungrpname=str(idialog.groupsComboBox.currentText())
chessrungrpname=grpnames[setindex]#override the choice because was not working 20Jan2011
self.h5chessgrp=self.h5chess[chessrungrpname]
if 'BckndInventory' in self.h5chessgrp:
self.h5chessgrp=self.h5chessgrp['BckndInventory']
else:
self.h5chessgrp=self.h5chessgrp.create_group('BckndInventory')
self.imagepointlist=[]
self.imagenamelist=[]
for counter, c in enumerate(self.h5marcounts):
if numpy.max(c[:, :])>0:
self.imagepointlist+=[(self.h5marcounts, counter)]
self.imagenamelist+=['image index %d' %counter]
for bname in ['bmin', 'bave', 'blin0', 'blin1']:#blin0 and blin1 have to be last so when they are omitted that doesn't change the indexing of imagepointlist
if (not self.h5mar is None) and bname in self.h5mar:
self.imagepointlist+=[self.h5mar[bname]]
self.imagenamelist+=[bname]
for counter, nam in enumerate(self.imagenamelist):
self.imageComboBox.insertItem(counter, nam)
print chessrungrpname, self.imagenamelist
# self.imageComboBox.setCurrentIndex(self.imagenamelist.index('image index 0'))
# self.newnameLineEdit.setText('NoSample_75s')
# self.performcopy()
# self.ExitRoutine()
else:
self.ExitRoutine()
def performcopy(self):
nam=str(self.newnameLineEdit.text())
if nam in self.h5chessgrp and not (self.overwriteCheckBox.isChecked()):
self.MsgLabel.setText('FAILED: Bcknd Image with that name already exists')
return
#try:
pnt=self.imagepointlist[self.imageComboBox.currentIndex()]
d={}
if isinstance(pnt, tuple):
print pnt
arr=pnt[0][pnt[1]]
print arr.shape
print pnt[0].file.filename
d['sourcefile']=pnt[0].file.filename
print pnt[0].name
d['sourcename']=pnt[0].name
print pnt[1]
d['sourcearrayindex']=pnt[1]
if 'scalar_data' in pnt[0].parent.parent:
sdg=pnt[0].parent.parent['scalar_data']
for ds in sdg.itervalues():
if isinstance(ds, h5py.Dataset):
k=ds.name.rpartition('/')[2]
if len(ds.shape)==0:
v=ds.value
elif len(ds.shape)==1:
v=ds[pnt[1]]
d[k]=v
print 'scalar ', k, v
else:
print 'scalar data not copied to BckndInventory for ', nam
for k, v in pnt[0].attrs.iteritems():
if isinstance(v, list) or isinstance(v, numpy.ndarray):
v=v[pnt[1]]
if k=='mod_multiplierarray' and v!=1.:
QMessageBox.warning(self,'It seems that this bcknd image was modified from raw data - this is discouraged for BckdnInventory')
d[k]=v
else:
print pnt
arr=readh5pyarray(pnt)
d['sourcefile']=arr.file.filename
d['sourcename']=arr.name
d['sourcearrayindex']=''
for k, v in pnt.attrs.iteritems():
d[k]=v
if nam in self.h5chessgrp:
del self.h5chessgrp[nam]
h5ds=self.h5chessgrp.create_dataset(nam, data=arr)
for key, val in d.iteritems():
h5ds.attrs[key]=val
self.MsgLabel.setText('%s successfully added to inventory' %nam)
# except:
# self.MsgLabel.setText('FAILED: fatal error, probably problem with name')
def ExitRoutine(self):
print 'BckndInventory Exit'
self.h5file.close()
self.h5chess.close()
class LinBckndDialog(QDialog,
ui_LinBckndDialog.Ui_LinBckndDialog):
def __init__(self, parent, h5path, h5groupstr):
super(LinBckndDialog, self).__init__(parent)
self.setupUi(self)
#***
self.h5path=h5path
self.h5groupstr=h5groupstr
self.h5file=h5py.File(self.h5path, mode='r+')
h5analysis=self.h5file['/'.join((self.h5groupstr, 'analysis'))]
self.h5mar=self.h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=self.h5file['/'.join((self.h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
self.attrdict=getattr(self.h5path, self.h5groupstr)
# if 'metallization' in self.h5file.attrs.keys():
# met=self.h5file.attrs['metallization']
# else:
# met=None
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
QObject.connect(self.buttonBox,SIGNAL("rejected()"),self.CancelledExitRoutine)
self.imagepointlist=[]
self.imagenamelist=[]
self.h5chess=CHESSRUNFILE()
h5chessrun=self.h5chess[self.attrdict['chessrunstr']]
if 'BckndInventory' in h5chessrun:
bckndgrppoint=h5chessrun['BckndInventory']
for dset in bckndgrppoint.values():
if isinstance(dset,h5py.Dataset):
if len(dset.shape)==3:
xd=self.attrdict['x']
zd=self.attrdict['z']
xs=dset.attrs['x']
zs=dset.attrs['z']
inds=[numpy.argmin((x-xs)**2+(z-zs)**2) for x, z in zip(xd, zd)]
if len(set(inds))>1:
self.imagenamelist+=['position-matched, bcknd inventory: '+dset.name.rpartition('/')[2]]
self.imagepointlist+=[dset]
else:
i=inds[0]
avedist=numpy.mean(numpy.sqrt((xs[i]-xd)**2+(zs[i]-zd)**2))
self.imagenamelist+=['posn-match index %d of bcknd %s, ave of %.1fmm separation' %(i, dset.name.rpartition('/')[2], avedist)]
self.imagepointlist+=[(dset, i)]
else:
self.imagenamelist+=['bcknd inventory: '+dset.name.rpartition('/')[2]]
self.imagepointlist+=[dset]
for counter, c in enumerate(h5marcounts):
if numpy.max(c[:, :])>0:
self.imagepointlist+=[(h5marcounts, counter)]
self.imagenamelist+=['this data, image index %d' %counter]
for bname in ['bmin', 'bave', 'blin0', 'blin1']:#blin0 and blin1 have to be last so when they are omitted that doesn't change the indexing of imagepointlist
if bname in self.h5mar:
self.imagepointlist+=[self.h5mar[bname]]
self.imagenamelist+=[bname]
for counter, nam in enumerate(self.imagenamelist):
for cb, notallowed in zip([self.imageComboBox0, self.imageComboBox1], ['blin0', 'blin1']):
if nam!=notallowed:
cb.insertItem(counter, nam)
self.zerofracSpinBox.setValue(0.02)
self.precisionSpinBox.setValue(0.001)
self.normrankSpinBox.setValue(0.8)
self.perform=False
def CancelledExitRoutine(self):
self.h5file.close()
self.h5chess.close()
def ExitRoutine(self):
pnt_attrslist=[]
for cb, nam, twle in zip([self.imageComboBox0, self.imageComboBox1], ['blin0', 'blin1'], [self.imagefracLineEdit0, self.imagefracLineEdit1]):
d={}
d['blinname']=nam
try:
d['trialimageweights']=numpy.float32(eval('['+str(twle.text())+']'))
except:
h5file.close()
if not self.h5chess is None:
self.h5chess.close()
print
QMessageBox.warning(self,"syntax error", "Aborting because the list of trial weights did not convert to array correctly.\nThe enetered string has been printed.\nSome blin data in .h5 may have been deleted.")
self.perform=False
return
pnt=self.imagepointlist[cb.currentIndex()]
d['description']=self.imagenamelist[cb.currentIndex()]
if isinstance(pnt, tuple):
print 'reading ', pnt[0].name
arr=pnt[0][pnt[1]]
d['sourcefile']=pnt[0].file.filename
d['sourcename']=pnt[0].name
d['sourcearrayindex']=pnt[1]
elif len(pnt.shape)==3:
xd=self.attrdict['x']
zd=self.attrdict['z']
xs=pnt.attrs['x']
zs=pnt.attrs['z']
inds=[numpy.argmin((x-xs)**2+(z-zs)**2) for x, z in zip(xd, zd)]
print 'reading ', pnt.name
arr=readh5pyarray(pnt)
arr=numpy.array([arr[i] for i in inds])
d['sourcefile']=pnt.file.filename
d['sourcename']=pnt.name
d['sourcearrayindex']=inds
else:
print 'reading ', pnt.name
arr=readh5pyarray(pnt)
d['sourcefile']=pnt.file.filename
d['sourcename']=pnt.name
d['sourcearrayindex']=''
dellist=[]
if nam in self.h5mar:
for pnt2 in self.h5mar.itervalues():
if isinstance(pnt2,h5py.Dataset):
temp=pnt2.name.rpartition('/')[2]
if nam in temp:#this gets rid of all the blin0bin$
dellist+=[temp]
print 'deleting ', dellist
for temp in dellist:
del self.h5mar[temp]
h5ds=self.h5mar.create_dataset(nam, data=arr)
for key, val in d.iteritems():
h5ds.attrs[key]=val
pnt_attrslist+=[(pnt, d)]
if self.propogateCheckBox.isChecked():
self.propogatetogroups(pnt_attrslist)
else:
self.batchh5grpstrlist=[]
self.h5file.close()
self.h5chess.close()
self.perform=True
def propogatetogroups(self, pnt_attrslist):# use self. sparingly as everything in the loop should be local to that epxeriment group
self.batchh5grpstrlist=[]
for g in self.h5file.values():
h5groupstr=g.name.rpartition('/')[2]
if h5groupstr==self.h5groupstr:
continue
try:
h5analysis=self.h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=self.h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=self.h5file['/'.join((h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
attrdict=getattr(self.h5path, h5groupstr)
except:
print 'skipping ', h5groupstr
continue
print g
self.batchh5grpstrlist+=[h5groupstr]
for pnt, d in pnt_attrslist:
nam=d['blinname']
print nam
if isinstance(pnt, tuple):
print pnt[0].name
if 'posn' in d['description'] or 'position' in d['description']:
xd=attrdict['x']
zd=attrdict['z']
xs=pnt[0].attrs['x']
zs=pnt[0].attrs['z']
inds=[numpy.argmin((x-xs)**2+(z-zs)**2) for x, z in zip(xd, zd)]
if len(set(inds))>1:
d['sourcearrayindex']=inds
arr=readh5pyarray(pnt[0])
arr=numpy.array([arr[i] for i in inds])
else:
i=inds[0]
d['sourcearrayindex']=i
arr=pnt[0][i]
else:
arr=pnt[0][pnt[1]]
elif len(pnt.shape)==3:
print pnt.name
xd=attrdict['x']
zd=attrdict['z']
xs=pnt.attrs['x']
zs=pnt.attrs['z']
inds=[numpy.argmin((x-xs)**2+(z-zs)**2) for x, z in zip(xd, zd)]
if len(set(inds))>1:
d['sourcearrayindex']=inds
arr=readh5pyarray(pnt)
arr=numpy.array([arr[i] for i in inds])
else:
i=inds[0]
d['sourcearrayindex']=i
arr=pnt[i]
else:
print pnt.name
arr=readh5pyarray(pnt)
d['sourcearrayindex']=''
dellist=[]
if nam in h5mar:
for pnt2 in h5mar.values():
if isinstance(pnt2,h5py.Dataset):
temp=pnt2.name.rpartition('/')[2]
if nam in temp:#this gets rid of all the blin0bin$
dellist+=[temp]
print 'deleting ', dellist
for temp in dellist:
del h5mar[temp]
h5ds=h5mar.create_dataset(nam, data=arr)
for key, val in d.iteritems():
h5ds.attrs[key]=val
class LinBckndDialog1d(QDialog,
ui_LinBckndDialog.Ui_LinBckndDialog):# not finished implementing
def __init__(self, parent, h5path, h5groupstr):
super(LinBckndDialog, self).__init__(parent)
self.setupUi(self)
#***
self.h5path=h5path
self.h5groupstr=h5groupstr
self.h5file=h5py.File(self.h5path, mode='r+')
h5analysis=self.h5file['/'.join((self.h5groupstr, 'analysis'))]
# self.h5mar=self.h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
# h5marcounts=self.h5file['/'.join((self.h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
attrdict=getattr(self.h5path, self.h5groupstr)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
QObject.connect(self.buttonBox,SIGNAL("rejected()"),self.CancelledExitRoutine)
self.imagepointlist=[]
self.imagenamelist=[]
self.h5chess=CHESSRUNFILE()
h5chessrun=self.h5chess[attrdict['chessrunstr']]
if 'BckndInventory' in h5chessrun:
bckndgrppoint=h5chessrun['BckndInventory']
for dset in bckndgrppoint.iterobjects():
if isinstance(dset,h5py.Dataset):
self.imagepointlist+=[dset]
self.imagenamelist+=['bcknd inventory: '+dset.name.rpartition('/')[2]]
# for counter, c in enumerate(h5marcounts):
# if numpy.max(c[:, :])>0:
# self.imagepointlist+=[(h5marcounts, counter)]
# self.imagenamelist+=['this data, image index %d' %counter]
# for bname in ['bmin', 'bave', 'blin0', 'blin1']:#blin0 and blin1 have to be last so when they are omitted that doesn't change the indexing of imagepointlist
# if bname in self.h5mar:
# self.imagepointlist+=[self.h5mar[bname]]
# self.imagenamelist+=[bname]
for counter, nam in enumerate(self.imagenamelist):
for cb, notallowed in zip([self.imageComboBox0, self.imageComboBox1], ['blin0', 'blin1']):
if nam!=notallowed:
cb.insertItem(counter, nam)
self.perform=False
def CancelledExitRoutine(self):
self.h5file.close()
self.h5chess.close()
def ExitRoutine(self):
for cb, nam, twle in zip([self.imageComboBox0, self.imageComboBox1], ['blin0', 'blin1'], [self.imagefracLineEdit0, self.imagefracLineEdit1]):
d={}
try:
d['trialimageweights']=numpy.float32(eval('['+str(twle.text())+']'))
except:
h5file.close()
if not self.h5chess is None:
self.h5chess.close()
print
QMessageBox.warning(self,"syntax error", "Aborting because the list of trial wieghts did not convert to array correctly.\nThe enetered string has been printed.\nSome blin data in .h5 may have been deleted.")
self.perform=False
return
pnt=self.imagepointlist[cb.currentIndex()]
if isinstance(pnt, tuple):
print 'reading ', pnt[0].name
arr=pnt[0][pnt[1]]
d['sourcefile']=pnt[0].file .filename
d['sourcename']=pnt[0].name
d['sourcearrayindex']=pnt[1]
else:
print 'reading ', pnt.name
arr=readh5pyarray(pnt)
d['sourcefile']=pnt.file.filename
d['sourcename']=pnt.name
d['sourcearrayindex']=''
dellist=[]
if nam in self.h5mar:
for pnt in self.h5mar.itervalues():
if isinstance(pnt,h5py.Dataset):
print pnt.name
print pnt.name.rpartition('/')[2]
temp=pnt.name.rpartition('/')[2]
if nam in temp:#this gets rid of all the blin0bin$
dellist+=[temp]
print dellist
for temp in dellist:
del self.h5mar[temp]
h5ds=self.h5mar.create_dataset(nam, data=arr)
for key, val in d.iteritems():
h5ds.attrs[key]=val
self.h5file.close()
self.h5chess.close()
self.perform=True
class highlowDialog(QDialog,
ui_highlowDialog.Ui_highlowDialog):
def __init__(self, parent, title):
super(highlowDialog, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle(title)
class bminDialog(QDialog,
ui_bmin_menu.Ui_bmin_menu):
def __init__(self, parent):
super(bminDialog, self).__init__(parent)
self.setupUi(self)
class importh5scanDialog(QDialog,
ui_h5scanDialog.Ui_h5scanDialog):
def __init__(self, parent, h5path):
super(importh5scanDialog, self).__init__(parent)
self.setupUi(self)
self.optionlist=[]
h5file=h5py.File(h5path, mode='r')
for grp in h5file.iterobjects():
print grp.name.rpartition('/')[2]
if isinstance(grp,h5py.Group):
#the below conditions means that the data must have this h5 format to analyze the data. if these conditions need to be loosened, the importattrDialog routines should allow user entry of the spec info
#if ('samx' in grp['measurement/scalar_data'] and len(grp['measurement/scalar_data/samx'].shape)==1) or ('samz' in grp['measurement/scalar_data/'] and len(grp['measurement/scalar_data/samz'].shape)==1):
if 'acquisition_command' in grp.attrs:
s=':'.join((grp.name.rpartition('/')[2], grp.attrs['acquisition_command']))
self.optionlist+=[s]
self.scanComboBox.insertItem(99,s)
# def numcvt(self, num):
# if numpy.abs(num-round(num))<0.005:
# return '%d' %int(round(num))
# if numpy.abs(num*10-round(num*10))<0.05:
# return '%.1f' %num
# return '%.2f' %num
#
# def makecommand(self, grp):
# samx=None
# samz=None
# if 'samx' in grp['scalar_data']:
# samx=grp['scalar_data/samx'][:]
# if 'samz' in grp['scalar_data']:
# samz=grp['scalar_data/samz'][:]
# if samx is None:
# samx=numpy.ones(samz.size, dtype='float32')*grp['positioners/samx'].value
# if samz is None:
# samz=numpy.ones(samx.size, dtype='float32')*grp['positioners/samz'].value
#
# startstr=''
# endstr=''
#
# if numpy.all(samx==samx[0]):
# endstr=''.join((' samx=', self.numcvt(samx[0])))
# startstr='ascan samz %s %s %d' %(self.numcvt(samz[0]), self.numcvt(samz[-1]), len(samz)-1)
# elif numpy.all(samz==samz[0]):
# endstr=''.join((' samz=', self.numcvt(samz[0])))
# startstr='ascan samx %s %s %d' %(self.numcvt(samx[0]), self.numcvt(samx[-1]), len(samx)-1)
# elif len(samz)==len(set(samz)):
# startstr='a2scan samx %s %s samz %s %s %d' %(self.numcvt(samx[0]), self.numcvt(samx[-1]), self.numcvt(samz[0]), self.numcvt(samz[-1]), len(samz)-1)
# else:
# startstr='mesh samx %s %s %d samz %s %s %d' %(self.numcvt(samx[0]), self.numcvt(samx[-1]), len(set(samx))-1, self.numcvt(samz[0]), self.numcvt(samz[-1]), len(set(samz))-1)
#
# icstr=''
# for item in grp['scalar_data'].iterobjects():
# if ('IC' in item.name.rpartition('/')[2]) and isinstance(item,h5py.Dataset):
# ic=item[:]
# if numpy.all(ic[(ic.max()-ic)<0.5*ic.max()]==ic.max()):#all elements bigger than half the max are equal to the max. this will exclude the near zero values corresponding to skipped points
# icstr=' -%d' %ic.max()
# if icstr=='':
# icstr=' %s' %self.numcvt(grp['scalar_data/Seconds'][0])
#
# return ''.join((startstr, icstr, endstr))
class chessrunattrDialog(QDialog,
ui_chessrunattr.Ui_chessrunattrDialog):
def __init__(self, parent):
super(chessrunattrDialog, self).__init__(parent)
self.setupUi(self)
self.attrdict=attrdict_def()
self.setvalues()
def setvalues(self):
self.tiltdirectionComboBox.insertItem(0, 'top')
self.tiltdirectionComboBox.insertItem(1, 'bottom')
self.tiltdirectionComboBox.insertItem(2, 'left')
self.tiltdirectionComboBox.insertItem(3, 'right')
self.tiltdirectionComboBox.setCurrentIndex(3)
self.xcenSpinBox.setValue(self.attrdict['cal'][0])
self.ycenSpinBox.setValue(self.attrdict['cal'][1])
self.LSpinBox.setValue(self.attrdict['cal'][2])
self.martiltSpinBox.setValue(self.attrdict['cal'][3])
self.tiltrotSpinBox.setValue(self.attrdict['cal'][4])
self.alphaSpinBox.setValue(self.attrdict['alpha'])
self.wavelengthSpinBox.setValue(self.attrdict['wavelength'])
self.existingTextBrowser.setPlainText('')
h5chess=CHESSRUNFILE()
for count, group in enumerate(h5chess.iterobjects()):
if isinstance(group, h5py.Group):
self.existingTextBrowser.append(group.name.rpartition('/')[2])
h5chess.close()
class importattrDialog(QDialog,
ui_import_attr.Ui_importattrDialog):
"""h5path and h5groupstr already exist, if attrdict doesn't exist use defaults otherwise display the current values and set self.attrdict to entered values"""
def __init__(self, parent, h5path, h5groupstr, command=None):
super(importattrDialog, self).__init__(parent)
self.setupUi(self)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.chessruncomboBox.clear()
self.imapcomboBox.clear()
self.chimapcomboBox.clear()
self.killmapcomboBox.clear()
self.attrdict=getattr(h5path, h5groupstr)
if 'cal' in self.attrdict.keys():
self.chessrun=self.attrdict['chessrunstr'][1:]
imapstr=self.attrdict['imapstr'][::-1].partition('/')[0][::-1]
chimapstr=self.attrdict['chimapstr'][::-1].partition('/')[0][::-1]
killmapstr=self.attrdict['killmapstr'][::-1].partition('/')[0][::-1]
else:
self.attrdict=attrdict_def()
self.chessrun=chessrun_def()
self.getchessrunattrs()
imapstr=None
chimapstr=None
killmapstr=None
if not (command is None):
self.attrdict['command']=str(command)
self.usespecCheckBox.setChecked(True)
self.fromspecattr={}
try:
h5file=h5py.File(self.h5path, mode='r')
h5root=h5file[self.h5groupstr]
self.fromspecattr['acquisition_time']=h5root['measurement/scalar_data/Seconds'][:]
self.fromspecattr['command']=h5root.attrs['acquisition_command']
temp_acsh=h5root.attrs['acquisition_shape']
if isinstance(temp_acsh, str):
temp_acsh=eval(temp_acsh)
self.fromspecattr['acquisition_shape']=temp_acsh
npts=numpy.prod(numpy.int16(temp_acsh))
samx=None
samz=None
if 'samx' in h5root['measurement/scalar_data']:
samx=h5root['measurement/scalar_data/samx'][:]
if 'samz' in h5root['measurement/scalar_data']:
samz=h5root['measurement/scalar_data/samz'][:]
if samx is None:
samx=numpy.ones(npts, dtype='float32')*h5root['measurement/positioners/samx'].value
if samz is None:
samz=numpy.ones(npts, dtype='float32')*h5root['measurement/positioners/samz'].value
self.fromspecattr['x']=samx
self.fromspecattr['z']=samz
h5file.close()
except:
self.usespecCheckBox.setChecked(False)
self.usespecCheckBox.setDisabled(True)
self.manualgriditems=[self.cmdLineEdit, self.inttimeSpinBox, self.xstartSpinBox, self.xintSpinBox, self.xptsSpinBox, self.zstartSpinBox, self.zintSpinBox, self.zptsSpinBox]
if self.attrdict['command']=='USER-COMPILED':
self.usespecCheckBox.setChecked(False)
self.usespecCheckBox.setDisabled(True)
for it in self.manualgriditems:
it.setDisabled(True)
self.setmapchoices(imapstr, chimapstr, killmapstr)
self.setvalues()
self.usespecprocess()
@pyqtSignature("")
def on_usespecCheckBox_clicked(self):
self.usespecprocess()
def usespecprocess(self):
usespec=self.usespecCheckBox.isChecked()
if usespec:
self.cmdLineEdit.setText(self.fromspecattr['command'])
self.calcfromcommand()
for it in self.manualgriditems:
it.setDisabled(usespec)
def setchessrunvalues(self):
self.xcenSpinBox.setValue(self.attrdict['cal'][0])
self.ycenSpinBox.setValue(self.attrdict['cal'][1])
self.LSpinBox.setValue(self.attrdict['cal'][2])
self.martiltSpinBox.setValue(self.attrdict['cal'][3])
self.tiltrotSpinBox.setValue(self.attrdict['cal'][4])
self.alphaSpinBox.setValue(self.attrdict['alpha'])
self.wavelengthSpinBox.setValue(self.attrdict['wavelength'])
if 'xrdname' in self.attrdict.keys():
self.xrdnameLineEdit.setText(self.attrdict['xrdname'])
if 'psize' in self.attrdict.keys():
self.psizeSpinBox.setValue(self.attrdict['psize']),
def setvalues(self):
self.setchessrunvalues()
ellineditlist=[self.el1LineEdit, self.el2LineEdit, self.el3LineEdit, self.el4LineEdit]
for le, els in zip(ellineditlist, self.attrdict['elements']):
le.setText(els)
self.cmdLineEdit.setText(self.attrdict['command'])
self.xstartSpinBox.setValue(self.attrdict['xgrid'][0])
self.xintSpinBox.setValue(self.attrdict['xgrid'][1])
self.xptsSpinBox.setValue(self.attrdict['xgrid'][2])
self.zstartSpinBox.setValue(self.attrdict['zgrid'][0])
self.zintSpinBox.setValue(self.attrdict['zgrid'][1])
self.zptsSpinBox.setValue(self.attrdict['zgrid'][2])
if self.attrdict['bcknd']=='lin':
self.bckndComboBox.setCurrentIndex(3)
elif self.attrdict['bcknd']=='ave':
self.bckndComboBox.setCurrentIndex(2)
elif self.attrdict['bcknd']=='min':
self.bckndComboBox.setCurrentIndex(1)
else:
self.bckndComboBox.setCurrentIndex(0)
def setmapchoices(self, istr, cstr, kstr):
index=-1
h5chess=CHESSRUNFILE()
count=0
for group in h5chess.iterobjects():
if isinstance(group, h5py.Group):
self.chessruncomboBox.insertItem(count, group.name.rpartition('/')[2])
if group.name.rpartition('/')[2]==self.chessrun:
index=count
count+=1
if index<0:
print 'PROBLEM FINDING A CHESSRUN THAT SHOULD EXIST'
return
self.chessruncomboBox.setCurrentIndex(index)
group=h5chess[self.chessrun]
index=0
count=0
subgrp=group['imap']
for dset in subgrp.iterobjects():
if isinstance(dset, h5py.Dataset) and not ('bin' in dset.name.rpartition('/')[2]):
self.imapcomboBox.insertItem(count, dset.name.rpartition('/')[2])
if dset.name.rpartition('/')[2]==istr:
index=count
count+=1
self.imapcomboBox.setCurrentIndex(index)
index=0
count=0
subgrp=group['chimap']
for dset in subgrp.iterobjects():
if isinstance(dset, h5py.Dataset) and not ('bin' in dset.name.rpartition('/')[2]):
self.chimapcomboBox.insertItem(count, dset.name.rpartition('/')[2])
if dset.name.rpartition('/')[2]==cstr:
index=count
count+=1
self.chimapcomboBox.setCurrentIndex(index)
index=0
count=0
subgrp=group['killmap']
for dset in subgrp.iterobjects():
if isinstance(dset, h5py.Dataset) and not ('bin' in dset.name.rpartition('/')[2]):
self.killmapcomboBox.insertItem(count, dset.name.rpartition('/')[2])
if dset.name.rpartition('/')[2]==kstr:
index=count
count+=1
self.killmapcomboBox.setCurrentIndex(index)
h5chess.close()
def getchessrunattrs(self):
h5chess=CHESSRUNFILE()
node=h5chess[self.chessrun]
for key, val in node.attrs.iteritems():
if key in self.attrdict.keys():
self.attrdict[key]=val
h5chess.close()
@pyqtSignature("")
def on_getchessruninfoButton_clicked(self):
self.chessrun=str(unicode(self.chessruncomboBox.currentText()))
self.chessruncomboBox.clear()
self.imapcomboBox.clear()
self.chimapcomboBox.clear()
self.killmapcomboBox.clear()
self.getchessrunattrs()
self.setmapchoices(None, None, None)
self.setchessrunvalues()
@pyqtSignature("")
def on_calcButton_clicked(self):
self.calcfromcommand()
def calcfromcommand(self):
a=unicode(self.cmdLineEdit.text()).encode()
b=('','',a)
c=[]
while len(b[2])>0:
b=b[2].partition(' ')
if b[0]!='':
c+=[b[0]]
if ('mesh' in c[0]) or ('a2scan' in c[0]):
i=2
j=6
if 'samz' in c[1]:
i=6
j=2
if 'sam' not in c[1]:
i=1
if 'sam' not in c[4]:
j=4
if 'a2scan' in c[0]:
c=c[:min(i, j)+2]+[c[-2]]+c[min(i, j)+2:]
if 'ascan' in c[0]:
if c[1]=='samx':
i=2
j=len(c)
if 'samz=' in c[-1]:
c+=[c[-1].partition('=')[2], c[-1].partition('=')[2], '0']
else:
c+=['0', '0', '0']
if c[1]=='samz':
j=2
i=len(c)
if 'samx=' in c[-1]:
c+=[c[-1].partition('=')[2], c[-1].partition('=')[2], '0']
else:
c+=['0', '0', '0']
try:
xgrid=(numpy.float32(eval(c[i])),numpy.float32(eval(c[i+1])),numpy.uint16(eval(c[i+2])))
zgrid=(numpy.float32(eval(c[j])),numpy.float32(eval(c[j+1])),numpy.uint16(eval(c[j+2])))
if xgrid[2]==0:
temp=0
else:
temp=(xgrid[1]-xgrid[0])/(xgrid[2])
xgrid=(xgrid[0], temp, xgrid[2]+1)
self.xstartSpinBox.setValue(xgrid[0])
self.xintSpinBox.setValue(xgrid[1])
self.xptsSpinBox.setValue(xgrid[2])
if zgrid[2]==0:
temp=0
else:
temp=(zgrid[1]-zgrid[0])/(zgrid[2])
zgrid=(zgrid[0], temp, zgrid[2]+1)
self.zstartSpinBox.setValue(zgrid[0])
self.zintSpinBox.setValue(zgrid[1])
self.zptsSpinBox.setValue(zgrid[2])
if len(c)>max(i, j)+2:
counter=eval(c[max(i, j)+3])
else:
counter=0
self.inttimeSpinBox.setValue(counter)
if counter<0:
self.integLabel.setText('integration\nXflash cts')
else:
self.integLabel.setText('integration\ntime (s)')
except (SyntaxError, NameError, IndexError):
#QMessageBox.warning(self,"syntax error", "grid values were not generated")
print 'grid values were not generated'
pass
class getgroupDialog(QDialog,
ui_get_group.Ui_getgroupDialog):
def __init__(self, parent, h5path):
super(getgroupDialog, self).__init__(parent)
self.setupUi(self)
self.h5path=h5path
self.groupsComboBox.clear()
h5file=h5py.File(self.h5path, mode='r')
dfltgrp=getdefaultscan(self.h5path)
dfltind=None
count=0
for group in h5file.iterobjects():
if isinstance(group,h5py.Group) and 'analysis' in group:
xrdname=getxrdname(group['analysis'])
if ('measurement/'+xrdname in group) and ('analysis/'+xrdname in group):
self.groupsComboBox.insertItem(count,group.name.rpartition('/')[2])
if dfltgrp==group.name.rpartition('/')[2]:
dfltind=count
count+=1
h5file.close()
if not dfltind is None:
self.groupsComboBox.setCurrentIndex(dfltind)
class selectorDialog(QDialog,
ui_get_group.Ui_getgroupDialog):
def __init__(self, parent, itemnames, title='Select an item', setindex=0):
super(selectorDialog, self).__init__(parent)
self.setupUi(self)
self.groupsComboBox.clear()
for count, item in enumerate(itemnames):
self.groupsComboBox.insertItem(count,item)
self.groupsComboBox.setCurrentIndex(setindex)
self.setWindowTitle(title)
class plotsoDialog(QDialog,
ui_plotsomenu.Ui_plotsoDialog):
def __init__(self, parent, itemnames, low, high, title='Select an item'):
super(plotsoDialog, self).__init__(parent)
self.setupUi(self)
self.lowSpinBox.setValue(low)
self.highSpinBox.setValue(high)
self.typeComboBox.clear()
for item in itemnames:
self.typeComboBox.insertItem(999,item)
self.typeComboBox.setCurrentIndex(0)
self.typeComboBox.setWindowTitle(title)
class pdfDialog(QDialog,
ui_pdfDialog.Ui_pdfDialog):
def __init__(self, parent, filename='PDFentries.txt', cvtfcn=lambda x:d_q(x/10.0)):
super(pdfDialog, self).__init__(parent)
self.setupUi(self)
names, pdflist=readpdffile(os.path.join(defaultdir('pdfentries'), filename))
self.pdflist=[[[cvtfcn(d), h] for d, h in pdf] for pdf in pdflist[::-1]]
for name in names:
self.pdfcomboBox.insertItem(0, name)
self.labellineEdit.setText('')
self.colorlineEdit.setText('r')
class messageDialog(QDialog,
ui_message_box.Ui_messageDialog):
def __init__(self, parent, msg):
super(messageDialog, self).__init__(parent)
self.setupUi(self)
self.messageLabel.setText(msg)
class qqanalysisDialog(QDialog,
ui_analyze_qq.Ui_qqanalysisDialog):
def __init__(self, parent):
super(qqanalysisDialog, self).__init__(parent)
self.setupUi(self)
class peakqqassociationDialog(QDialog,
ui_associate_pkqq.Ui_peakqqassociationDialog):
def __init__(self, parent):
super(peakqqassociationDialog, self).__init__(parent)
self.setupUi(self)
class makephasesDialog(QDialog,
ui_make_phases_menu.Ui_makephasesDialog):
def __init__(self, parent):
super(makephasesDialog, self).__init__(parent)
self.setupUi(self)
class spatialphasesDialog(QDialog,
ui_spatial_phases_menu.Ui_spatialphasesDialog):
def __init__(self, parent):
super(spatialphasesDialog, self).__init__(parent)
self.setupUi(self)
class chiqDialog(QDialog,
ui_chiqDialog.Ui_chiqDialog):
def __init__(self, parent, qgrid, chigrid):
super(chiqDialog, self).__init__(parent)
self.setupUi(self)
self.gridLabel.setText('Q is currently starting at %0.2f with %0.2f interval. Approximately %0.2f pts\nChi is currently starting at %0.2f, with %0.2f interval. Approximately %0.2f pts' %tuple(qgrid+chigrid))
class plot2dintwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, navchoice, navkill=False):
super(plot2dintwindow, self).__init__(parent)
self.navchoice=navchoice
self.critradius=36 #2mm of edge of 3" wafer off limits
self.navkill=navkill
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.savename1=''.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr.rpartition('.')[2], '_'))
self.imnamelist=[]
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
attrdict=getattr(self.h5path, self.h5groupstr)
self.pointlist=h5analysis.attrs['pointlist']
self.bin=getbin(h5analysis)
self.killmap=getkillmap(h5analysis.attrs['killmapstr'])
self.killmapbin=getkillmap(h5analysis.attrs['killmapstr'], bin=self.bin)
self.imagewidth=self.killmap.shape[0]
#for display killmap also takes out pixels not in imap - for editing killmap, don't involve imap
self.imap, self.qgrid=getimapqgrid(h5analysis.attrs['imapstr'])
self.imapbin=getimapqgrid(h5analysis.attrs['imapstr'], qgrid=False, bin=self.bin)
self.imapkillmap=self.killmap*(self.imap!=0)
self.imapkillmapbin=self.killmapbin*(self.imapbin!=0)
self.chimap, self.chigrid=getchimapchigrid(h5analysis.attrs['chimapstr'])
self.chimapbin=getchimapchigrid(h5analysis.attrs['chimapstr'], chigrid=False, bin=self.bin)
self.bcknd=attrdict['bcknd']
try:
if 'lin' in self.bcknd:
self.bckndarr, self.blinwts=readblin(h5mar)
self.bckndarrbin, self.blinwts=readblin(h5mar, bin=self.bin)
else:
bstr=''.join(('b', self.bcknd[:3]))
self.bckndarr=readh5pyarray(h5mar[bstr])
bstr=''.join((bstr, 'bin%d' %self.bin))
self.bckndarrbin=readh5pyarray(h5mar[bstr])
if self.bcknd=='minanom':
if 'bimap' in h5mar:
bimap=readh5pyarray(h5mar['bimap'])
bqgrid=h5mar['bimap'].attrs['bqgrid']
else:
bimap=None
bqgrid=None
self.banomcalc=(self.imapbin, self.qgrid, attrdict, bimap, bqgrid)
self.bminanomf=readh5pyarray(h5mar['bminanomf'])
bckndexists=True
except:
bckndexists=False
h5file.close()
self.imnumlist=self.pointlist[:]
self.imnamelist=['%d' %p for p in self.pointlist]
self.bcknd=attrdict['bcknd']
self.xgrid=attrdict['xgrid']
self.zgrid=attrdict['zgrid']
self.xcoords=attrdict['x']
self.zcoords=attrdict['z']
self.L=attrdict['cal'][2]
self.wl=attrdict['wavelength']
self.psize=attrdict['psize']
self.setWindowTitle('Plot 2D Intensity')
self.logCheckBox=QCheckBox()
self.logCheckBox.setText('logarithmic\nintensity')
self.logCheckBox.setChecked(False)
self.killCheckBox=QCheckBox()
self.killCheckBox.setText('apply kill map\nin main image')
self.killCheckBox.setChecked(True)
self.binCheckBox=QCheckBox()
self.binCheckBox.setText('use binned data')
self.binCheckBox.setChecked(True)
QObject.connect(self.binCheckBox,SIGNAL("stateChanged()"),self.fillimComboBox)
self.bckndCheckBox=QCheckBox()
self.bckndCheckBox.setText('subtract background')
self.bckndCheckBox.setChecked(bckndexists)
self.bckndCheckBox.setEnabled(bckndexists)
self.drawbckndButton=QPushButton()
self.drawbckndButton.setText('draw bcknd')
if bckndexists:
QObject.connect(self.drawbckndButton,SIGNAL("pressed()"),self.drawbcknd)
else:
def msg():
print 'NO BACKND FOUNTD SO IGNORING PLOT REQUEST'
QObject.connect(self.drawbckndButton,SIGNAL("pressed()"), msg)
self.imComboBox=QComboBox()
self.imComboBox.setToolTip('spec index of image to be plotted')
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
chiqButton=QPushButton()
if self.chimapbin is None:
chiqButton.setText('build chimapbin\nfor Chi-Q plot')
else:
chiqButton.setText('Chi-Q plot\n(time intensive)')
QObject.connect(chiqButton,SIGNAL("pressed()"),self.chiqplot)
rangelayout=QVBoxLayout()
rangelabel=QLabel()
rangelabel.setText('Range for cbar:')
self.rangeLineEdit=QLineEdit()
self.rangeLineEdit.setToolTip('two comma-delimited\nnumbers for min and max')
rangelayout.addWidget(rangelabel)
rangelayout.addWidget(self.rangeLineEdit)
toplayout=QGridLayout()
toplayout.addWidget(self.logCheckBox, 0, 0)
toplayout.addWidget(self.killCheckBox, 0, 1)
toplayout.addWidget(self.binCheckBox, 0, 2)
toplayout.addWidget(self.bckndCheckBox, 0, 3)
toplayout.addWidget(self.drawbckndButton, 0, 4)
toplayout.addWidget(self.imComboBox, 1, 0)
toplayout.addWidget(self.drawButton, 1, 1)
toplayout.addLayout(rangelayout, 1, 2)
toplayout.addWidget(self.saveButton, 1, 3)
if self.bcknd=='minanom' and not self.navkill:
self.banomButton=QPushButton()
self.banomButton.setText('plot\nbanom')
QObject.connect(self.banomButton,SIGNAL("pressed()"),self.drawbanom)
toplayout.addWidget(self.banomButton, 0, 5)
toplayout.addWidget(chiqButton, 1, 4)
layout=QVBoxLayout()
layout.addLayout(toplayout)
self.imgLabel=QLabel()
layout.addWidget(self.imgLabel)
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
layout.addWidget(self.plotw)
toolbar=self.plotw.gettoolbarinstance()
self.savenavimageButton=QPushButton()
self.savenavimageButton.setText('save .png\nnavigator')
QObject.connect(self.savenavimageButton,SIGNAL("pressed()"),self.savenavimage)
if self.navchoice==0:
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
elstr=attrdict['elements']
if self.navchoice==1:
infotype='DPmolfracALL'
else:
infotype='XRFmolfracALL'
self.elstrlist, self.compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=infotype)
if self.compsarr is None:
print 'NO COMPOSITION NAVIGATOR WINDOW BECAUSE PROBLEM CALCULATING COMPOSITIONS'
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
print 'COMPS:', self.compsarr
self.navw = compnavigatorwidget(self, self.compsarr, self.elstrlist)
QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
if self.navkill:
self.savekillmapimageButton=QPushButton()
self.savekillmapimageButton.setText('save .png\nkillmap')
QObject.connect(self.savekillmapimageButton,SIGNAL("pressed()"),self.savekillmapimage)
self.savekillmapButton=QPushButton()
self.savekillmapButton.setText('save kill map\nfor analysis')
QObject.connect(self.savekillmapButton,SIGNAL("pressed()"),self.savekillmap)
self.clearkillButton=QPushButton()
self.clearkillButton.setText('clear\nkill map')
QObject.connect(self.clearkillButton,SIGNAL("pressed()"),self.clearkill)
self.clickkillButton=QPushButton()
self.clickkillButton.setText("click kill\nregions")
QObject.connect(self.clickkillButton,SIGNAL("pressed()"),self.clickkill)
self.clickkillregionsSpinBox=QSpinBox()
self.clickkillregionsSpinBox.setValue(1)
self.clickkillregionsSpinBox.setRange(1, 10)
self.radkillButton=QPushButton()
self.radkillButton.setText("rad kill\nbeyond mm")
QObject.connect(self.radkillButton,SIGNAL("pressed()"),self.radkill)
self.radkillmmSpinBox=QSpinBox()
self.radkillmmSpinBox.setValue(173)
self.radkillmmSpinBox.setRange(1, 173)
radkilllayout=QHBoxLayout()
radkilllayout.addWidget(self.radkillButton)
radkilllayout.addWidget(self.radkillmmSpinBox)
clickkilllayout=QHBoxLayout()
clickkilllayout.addWidget(self.clickkillButton)
clickkilllayout.addWidget(self.clickkillregionsSpinBox)
# killcontrollayout.addWidget(self.savekillmapimageButton)
# killcontrollayout.addWidget(self.savekillmapButton)
# killcontrollayout.addWidget(self.clearkillButton)
# killcontrollayout.addWidget(self.clickkillButton)
# killcontrollayout.addWidget(self.clickkillapplyCheckBox)
# navcontrollayout=QHBoxLayout()
self.savepointlistButton=QPushButton()
self.savepointlistButton.setText('save image set\nfor analysis')
QObject.connect(self.savepointlistButton,SIGNAL("pressed()"),self.savepointlist)
self.removeedgeButton=QPushButton()
self.removeedgeButton.setText('remove images at\nsubstrate edge')
QObject.connect(self.removeedgeButton,SIGNAL("pressed()"),self.removeedge)
self.togglepointButton=QPushButton()
self.togglepointButton.setText(' \n ')
QObject.connect(self.togglepointButton,SIGNAL("pressed()"),self.togglepoint)
self.toggleaction=-1 #=0->in lis, action is to remove from list, =1->vice versa
# navcontrollayout.addWidget(self.savenavimageButton)
# navcontrollayout.addWidget(self.savepointlistButton)
# navcontrollayout.addWidget(self.removeedgeButton)
# navcontrollayout.addWidget(self.togglepointButton)
# navcontrollayout.addWidget(self.killCheckBox)
killnavbuttonlayout=QVBoxLayout()
killnavbuttonlayout.addLayout(radkilllayout)
killnavbuttonlayout.addLayout(clickkilllayout)
killnavbuttonlayout.addWidget(self.clearkillButton)
killnavbuttonlayout.addWidget(self.savekillmapimageButton)
killnavbuttonlayout.addWidget(self.savekillmapButton)
killnavbuttonlayout.addWidget(self.savenavimageButton)
killnavbuttonlayout.addWidget(self.savepointlistButton)
killnavbuttonlayout.addWidget(self.removeedgeButton)
killnavbuttonlayout.addWidget(self.togglepointButton)
leftlayout=QVBoxLayout()
QObject.connect(self.plotw, SIGNAL("clicksdone"), self.clickkillcont)
# leftlayout.addLayout(navcontrollayout)
# leftlayout.addLayout(killcontrollayout)
self.killw = plotwidget(self, width=3, height=3, dpi=100, showcolbar=False)
leftlayout.addWidget(self.killw)
leftlayout.addWidget(self.navw)
xlayout=QHBoxLayout()
xlayout.addLayout(leftlayout)
xlayout.addLayout(killnavbuttonlayout)
xlayout.addLayout(layout)
self.setLayout(xlayout)
self.drawkillmap()
else:
leftlayout=QVBoxLayout()
leftlayout.addWidget(self.savenavimageButton)
leftlayout.addWidget(self.navw)
xlayout=QHBoxLayout()
xlayout.addLayout(leftlayout)
xlayout.addLayout(layout)
self.setLayout(xlayout)
self.killbool=False
self.navw.plotpoints(self.pointlist, list(set(self.imnumlist)-set(self.pointlist)))
self.chiqplotbool=False
self.fillimComboBox()
self.imname=unicode(self.imComboBox.currentText())
try:
self.imnum=eval(self.imname)
except:
print 'abortng plot2d because some error in point selections'
return
def fillimComboBox(self):
self.imComboBox.clear()
if len(self.imnamelist)>0:
for name in self.imnamelist:
self.imComboBox.insertItem(999, name)
else:
self.imComboBox.insertItem(0, 'err')
self.imComboBox.setCurrentIndex(0)
def chiqplot(self):
idialog=chiqDialog(self, self.qgrid, self.chigrid)
if idialog.exec_():
self.chiq_imagebin=idialog.imagebinSpinBox.value()
self.chiq_chibin=idialog.qbinSpinBox.value()
self.chiq_qbin=idialog.chibinSpinBox.value()
self.chiq_solidanglebool=idialog.solidangleCheckBox.isChecked()
self.chiqplotbool=True
self.draw()
self.chiqplotbool=False
def draw(self):
self.binbool=self.binCheckBox.isChecked()
self.bckndbool=self.bckndCheckBox.isChecked()
self.killbool=self.killCheckBox.isChecked() and (not self.bckndbool)
# if self.navkill:
# self.killbool=self.killCheckBox.isChecked()
rangestr=unicode(self.rangeLineEdit.text())
try:
range=eval(rangestr)
if isinstance(range,(int,float)):
range=(0., 1.*range)
if len(range)==1:
range=(0., range[0])
except:
range=None
self.imname=unicode(self.imComboBox.currentText())
self.imnum=eval(self.imname)
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
if self.binbool:
h5arr=h5file['/'.join((self.h5groupstr, 'analysis/'+getxrdname(h5analysis)+'/countsbin%d' %self.bin))]
else:
h5arr=h5file['/'.join((self.h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
plotarr=h5arr[self.imnum, :, :]
h5file.close()
if self.bckndbool:
if self.binbool:
if self.bckndarrbin is None:
QMessageBox.warning(self,"failed", "binned background not found")
else:
if self.bcknd=='minanom':
if self.bminanomf[self.imnum, 0]<0:
QMessageBox.warning(self,"failed", "minanom background not available and will not be calculated with binning\n try again without binning but it will take while")
self.bckndbool=False
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=bckndsubtract(plotarr, self.bckndarrbin, self.imapkillmapbin, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[self.imnum, 0], self.bminanomf[self.imnum, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(self.bckndarrbin, self.imnum), self.imapkillmapbin, btype=self.bcknd, linweights=self.blinwts[self.imnum])[0]
else:
plotarr=bckndsubtract(plotarr, self.bckndarrbin, self.imapkillmapbin, btype=self.bcknd)[0]
else:
if self.bckndarr is None:
QMessageBox.warning(self,"failed", "background not found")
self.bckndbool=False
else:
if self.bcknd=='minanom':
if self.bminanomf[self.imnum, 0]<0:
print 'WARNING: calculating bminanom background (for histogram analysis) on the fly: INEFFICIENT'
temp=bckndsubtract(plotarr, self.bckndarr, self.imapkillmap, btype=self.bcknd, banomcalc=self.banomcalc)
plotarr=temp[0]
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=bckndsubtract(plotarr, self.bckndarr, self.imapkillmap, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[self.imnum, 0], self.bminanomf[self.imnum, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(self.bckndarr, self.imnum), self.imapkillmap, btype=self.bcknd, linweights=self.blinwts[self.imnum])[0]
else:
plotarr=bckndsubtract(plotarr, self.bckndarr, self.imapkillmap, btype=self.bcknd)[0]
elif self.killbool:
if self.binbool:
plotarr*=self.imapkillmapbin
else:
plotarr*=self.imapkillmap
if self.chiqplotbool:
if self.binbool:
imap=self.imapbin
chimap=self.chimapbin
killmap=self.imapkillmapbin
else:
imap=self.imap
chimap=self.chimap
killmap=self.imapkillmap
if self.chiq_imagebin>1:
killmap=binboolimage(killmap, bin=self.chiq_imagebin)
chimap=binimage(chimap, zerokill=True, bin=self.chiq_imagebin, mapbin=self.chiq_chibin)
imap=binimage(imap, zerokill=True, bin=self.chiq_imagebin, mapbin=self.chiq_qbin)
plotarr=binimage(plotarr, bin=self.chiq_imagebin)*killmap
else:
chimap=mapbin(chimap, mapbin=self.chiq_chibin)
imap=mapbin(imap, mapbin=self.chiq_qbin)
qgrid=bingrid_grid(self.qgrid, mapbin=self.chiq_qbin)
chigrid=bingrid_grid(self.chigrid, mapbin=self.chiq_chibin)
chivals=q_qgrid_ind(chigrid, range(numpy.max(chimap)))
qvals=q_qgrid_ind(qgrid, range(numpy.max(imap)))
datamask=numpy.bool_([[(ch in chimap) and (i in imap) for ch in xrange(1, numpy.max(chimap)+1)] for i in xrange(1, numpy.max(imap)+1)])
plotarr=numpy.array([[(plotarr[(chimap==ch)&(imap==i)]).mean(dtype='float32') for ch in xrange(1, numpy.max(chimap)+1)] for i in xrange(1, numpy.max(imap)+1)], dtype=plotarr.dtype)
plotarr*=datamask
if self.chiq_solidanglebool:
plotarr=numpy.array([row/(1.0*powdersolidangle_q(qvals[count], self.L, self.wl, psize=self.psize)) for count, row in enumerate(plotarr)], dtype=plotarr.dtype)
self.plotw.performplot(plotarr, upperorigin=False, axesformat='chiq', qvals=qvals, chivals=chivals, log=self.logCheckBox.isChecked(), colrange=range)
self.savename2=''.join(('ChiQ', self.imname))
else:
self.plotw.performplot(plotarr, log=self.logCheckBox.isChecked(), colrange=range)
self.savename2=self.imname
self.plotimagewidth=plotarr.shape[0]
self.plotw.fig.canvas.draw()
if self.binbool:
t1=', binned'
self.savename2=''.join((self.savename2, '_bin'))
else:
t1=''
if self.bckndbool:
t2=', background subtracted'
self.savename2=''.join((self.savename2, '_b', self.bcknd))
else:
t2=''
if self.killbool:
t3=', kill pixels ->0'
self.savename2=''.join((self.savename2, '_kill'))
else:
t3=''
self.imgLabel.setText(''.join(('plot of image ',self.imname, t1, t2, t3)))
def drawbcknd(self):
self.binbool=self.binCheckBox.isChecked()
self.killbool=self.killCheckBox.isChecked()
self.imname=unicode(self.imComboBox.currentText())
self.imnum=eval(self.imname)
if self.bcknd=='minanom':
self.binbool=True
self.killbool=True
if self.bminanomf[self.imnum, 0]<0:
QMessageBox.warning(self,"failed", "banom not available for this image")
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=(self.bckndarrbin*self.bminanomf[self.imnum, 0]+banom*self.bminanomf[self.imnum, 0])*self.imapkillmapbin
elif 'lin' in self.bcknd:
if self.binbool:
plotarr=combineimageswithwieghts(self.blinwts[self.imnum], constructbckndarr_linbyposn(self.bckndarrbin, self.imnum))
else:
plotarr=combineimageswithwieghts(self.blinwts[self.imnum], constructbckndarr_linbyposn(self.bckndarr, self.imnum))
if self.killbool:
if self.binbool:
plotarr*=self.imapkillmapbin
else:
plotarr*=self.imapkillmap
else:
if self.binbool:
plotarr=self.bckndarrbin
else:
plotarr=self.bckndarr
if self.killbool:
if self.binbool:
plotarr*=self.imapkillmapbin
else:
plotarr*=self.imapkillmap
self.plotw.performplot(plotarr, log=self.logCheckBox.isChecked())
self.plotimagewidth=plotarr.shape[0]
self.repaint()
self.savename2=self.bcknd
if self.bcknd=='minanom':
self.savename2=''.join((self.savename2, self.imname))
t1=''.join((' for ', self.imname))
elif self.binbool:
t1=', binned'
self.savename2=''.join((self.savename2, '_bin'))
else:
t1=''
self.imgLabel.setText(''.join(('plot of ',self.bcknd,' background image', t1)))
self.plotw.fig.canvas.draw()
def drawbanom(self):
self.imname=unicode(self.imComboBox.currentText())
temp=self.imname
while temp.startswith('0'):
temp=temp[1:]
if temp=='':
temp='0'
self.imnum=eval(temp)
if self.bminanomf[self.imnum, 0]<0:
QMessageBox.warning(self,"failed", "banom not available for this image")
else:
h5file=h5py.File(self.h5path, mode='r')
analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
self.plotw.performplot(banom*self.imapkillmapbin, log=self.logCheckBox.isChecked())
self.plotimagewidth=banom.shape[0]
self.repaint()
self.savename2=''.join(('banom', self.imname))
self.imgLabel.setText(''.join(('plot of banom for ', self.imname)))
self.plotw.fig.canvas.draw()
def drawkillmap(self):
self.binbool=self.binCheckBox.isChecked()
if self.binbool:
self.killw.performplot(self.killmapbin)
else:
self.killw.performplot(self.killmap)
self.killw.fig.canvas.draw()
def picclickprocess(self, picnum):
picname='%d' %picnum
if picname in self.imnamelist:
for i in range(len(self.imnamelist)):
if self.imnamelist[i]==picname:
self.imComboBox.setCurrentIndex(i)
break
if self.navkill:
if picnum in self.pointlist:
self.toggleaction=0
self.togglepointButton.setText('exclude point\nfrom analysis')
else:
self.toggleaction=1
self.togglepointButton.setText('include point\nin analysis')
self.draw()
self.navw.plotpoints(self.pointlist, list(set(self.imnumlist)-set(self.pointlist)), select=[self.imnum])
self.navw.fig.canvas.draw()
def togglepoint(self):
if self.toggleaction>=0: #delete the point and then add it if it was supposed to be added - ensures no duplicates
pt=self.imnum
temp=[]
for i in self.pointlist:
if i!=pt:
temp+=[i]
self.pointlist=temp
if self.toggleaction==1:
self.pointlist+=[pt]
self.pointlist.sort()
self.navw.plotpoints(self.pointlist, list(set(self.imnumlist)-set(self.pointlist)), select=[pt])
self.navw.fig.canvas.draw()
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2))).replace('\\','/').encode())
def savekillmapimage(self):
self.killw.save(os.path.join(self.runpath, ''.join((self.savename1, '_killmap'))).replace('\\','/').encode())
def savenavimage(self):
self.navw.save(os.path.join(self.runpath, ''.join((self.savename1, '_points'))).replace('\\','/').encode())
def savekillmap(self):
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
killmapgrpstr=h5analysis.attrs['killmapstr']
chessh5grpstr=killmapgrpstr.rpartition('/')[0]
h5chess=CHESSRUNFILE('r+')
h5chesskillgrp=h5chess[chessh5grpstr]
maxkill=0
for dset in h5chesskillgrp.iterobjects():
if isinstance(dset, h5py.Dataset) and (dset.name.rpartition('/')[2]).startswith('killmap') and (dset.name.rpartition('/')[2]).partition('killmap')[2].isdigit():
maxkill=max(maxkill, eval((dset.name.rpartition('/')[2]).partition('killmap')[2]))
print 'maxkill', maxkill
newkillname='killmap%d' %(maxkill+1)
dset=h5chesskillgrp.create_dataset(newkillname, data=self.killmap)
dset.attrs['h5createdpath']=str(self.h5path)
h5chesskillgrp.create_dataset(newkillname+'bin%d' %self.bin,data=self.killmapbin)
h5chess.close()
h5analysis.attrs['killmapstr']='/'.join((chessh5grpstr, newkillname))
updatelog(h5analysis, ''.join(('new killmap created. finished ', time.ctime())))
h5file.close()
def clearkill(self):
shape=self.killmap.shape
shapebin=self.killmapbin.shape
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5chess=CHESSRUNFILE()
self.killmap=readh5pyarray(h5chess[getxrdname(h5analysis)+'killmap'])
self.killmapbin=readh5pyarray(h5chess[getxrdname(h5analysis)+('killmapbin%d' %self.bin)])
h5chess.close()
h5file.close()
self.imapkillmap=self.killmap*(self.imap!=0)
self.imapkillmapbin=self.killmapbin*(self.imapbin!=0)
self.drawkillmap()
def radkill(self):
radmm=self.radkillmmSpinBox.value()
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5chess=CHESSRUNFILE()
radmap=readh5pyarray(h5chess[getxrdname(h5analysis)+'radiusmap'])
h5chess.close()
h5file.close()
self.killmap[radmap>radmm]=0
self.killmapbin=binboolimage(self.killmap, bin=self.bin)
self.imapkillmap=self.killmap*(self.imap!=0)
self.imapkillmapbin=self.killmapbin*(self.imapbin!=0)
self.drawkillmap()
def clickkill(self):
clicks=self.clickkillregionsSpinBox.value()*2
self.plotw.countclicks(clicks)
QMessageBox.information(self, 'INSTRUCTIONS', ''.join(("Click center and then radius of each\ncircle you want to add to kill map.\nTotal of ", "%d" %clicks, " clicks needed.")))
def clickkillcont(self, ptlist):
clicklist=numpy.round(numpy.float32(ptlist)*self.imagewidth/self.plotimagewidth)
print clicklist
cen=[]
rad=[]
for i in range(clicklist.shape[0]//2):
cen=[clicklist[2*i, 0], clicklist[2*i, 1]]
rad=numpy.uint16(numpy.ceil(numpy.sqrt((clicklist[2*i, 0]-clicklist[2*i+1, 0])**2+(clicklist[2*i, 1]-clicklist[2*i+1, 1])**2)))
temp=0
for pix in range(2*rad+1):
if 0<=cen[0]+pix-rad<self.imagewidth:
d=numpy.uint16(numpy.sqrt(rad**2-(pix-rad)**2))
d1=max(0,cen[1]-d)
d2=min(self.imagewidth,cen[1]+d+1)
self.killmap[cen[0]+pix-rad,d1:d2]=False
temp+=d2-d1
self.killmapbin=binboolimage(self.killmap, bin=self.bin)
self.imapkillmap=self.killmap*(self.imap!=0)
self.imapkillmapbin=binboolimage(self.imapkillmap, bin=self.bin)
self.drawkillmap()
#
# def clicklogger(self, posn):
# #posn is list [x,y] pixels wrt to top left pixel=(0,0) and x,y are fractions of image width
# if self.countclicks:
# clicklist+=[[posn[0]*self.imagewidth, posn[1]*self.imagewidth]]
#
def savepointlist(self):
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5analysis.attrs['pointlist']=self.pointlist
updatelog(h5analysis, ''.join(('user-defined pointlist saved. finished ', time.ctime())))
h5file.close()
def removeedge(self):
temp=[]
for pt in self.imnumlist:
if pt in self.pointlist and (self.xcoords[pt]**2+self.zcoords[pt]**2)<self.critradius**2:
temp+=[pt]
self.pointlist=temp
self.navw.plotpoints(self.pointlist, list(set(self.imnumlist)-set(self.pointlist)), select=[self.imnum])
self.navw.fig.canvas.draw()
class depprofDialog(QDialog,
ui_dep_prof.Ui_DepProfDialog):
def __init__(self, parent, elstr=None):
super(depprofDialog, self).__init__(parent)
self.setupUi(self)
self.elLineEdit=[self.lineEditgun0, self.lineEditgun1, self.lineEditgun2, self.lineEditgun3]
self.rateSpinBox=[self.doubleSpinBoxrate0, self.doubleSpinBoxrate1, self.doubleSpinBoxrate2, self.doubleSpinBoxrate3]
self.voltSpinBox=[self.doubleSpinBoxvolt0, self.doubleSpinBoxvolt1, self.doubleSpinBoxvolt2, self.doubleSpinBoxvolt3]
self.dpComboBox=[self.comboBoxdp0, self.comboBoxdp1, self.comboBoxdp2, self.comboBoxdp3]
self.respLineEdit=[self.lineEditresp0, self.lineEditresp1, self.lineEditresp2, self.lineEditresp3]
self.fracSpinBox=[self.doubleSpinBoxfrac0, self.doubleSpinBoxfrac1, self.doubleSpinBoxfrac2, self.doubleSpinBoxfrac3]
for le in self.elLineEdit:
le.setText(' ')#to make sure the upcoming update counts as "changed"
QObject.connect(self.lineEditgun0,SIGNAL("textChanged()"),self.elchanged0)
QObject.connect(self.lineEditgun1,SIGNAL("textChanged()"),self.elchanged1)
QObject.connect(self.lineEditgun2,SIGNAL("textChanged()"),self.elchanged2)
QObject.connect(self.lineEditgun3,SIGNAL("textChanged()"),self.elchanged3)
QObject.connect(self.pushButtonRespCoef,SIGNAL("pressed()"),self.CalcRespCoef)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.readdepprof() #important that ths comes first
self.propdict={}
if isinstance(elstr, str):
elsymlist=self.DecipherElementStr(elstr)
else:
elsymlist=elstr
for elsym, le in zip(elsymlist, self.elLineEdit):
le.setText(elsym)
#the above signals are not working so for now at least call the functions for an intial run
self.elchanged0()
self.elchanged1()
self.elchanged2()
self.elchanged3()
def DecipherElementStr(self, elstr):
#elsymbols=[Elemental.table[i].symbol for i in range(len(Elemental.table))]+['X', 'x']
elsymbols=['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Uub', 'Uut', 'Uuq', 'Uup', 'Uuh', 'Uus', 'Uuo', 'X', 'x']
foundel=[[el, elstr.find(el)] for el in elsymbols if el in elstr]
#this next section says if 2 elements matched at the same place take the longer named element. i/e/ if P and Pt found, use Pt
startinds=set([fe[1] for fe in foundel])
def strlencmp(a,b):
return (len(a)>len(b))*2-1
temp=[]
for si in startinds:
temp+=[sorted([fe for fe in foundel if fe[1]==si],key=operator.itemgetter(0),cmp=strlencmp, reverse=True)[0]]
foundel=temp
foundel=sorted(foundel,key=operator.itemgetter(1))
fourelstr=[]
for i in range(4):
if i<len(foundel) and not (foundel[i][0] in ['X', 'x']):
fourelstr+=[foundel[i][0]]
else:
fourelstr+=['']
return fourelstr
def CalcRespCoef(self):
elstrlist=[str(le.text()) for le in self.elLineEdit]
for k, v in GunPropertyDict(elstrlist,True).iteritems():
self.propdict[k]=v
self.propdict['ProfileParams']=[self.profiles[cbox.currentIndex()][1] for i, cbox in enumerate(self.dpComboBox) if i in self.propdict['guninds']]
self.propdict['voltages']=[sb.value() for i, sb in enumerate(self.voltSpinBox) if i in self.propdict['guninds']]
self.propdict['CenterMolRates']=[sb.value() for i, sb in enumerate(self.rateSpinBox) if i in self.propdict['guninds']]
print 'propdict'
print self.propdict
self.propdict['RespAgunBgunCoef']=SortedRespCoef(self.propdict)
for le, sb, (a, b, c, f) in zip(self.respLineEdit, self.fracSpinBox, self.propdict['RespAgunBgunCoef']): #will only write as many as are there and only 6 if there are more
le.setText('%s by %s : %.2f' %(self.propdict['symbol'][self.propdict['guninds'].index(a)], self.propdict['symbol'][self.propdict['guninds'].index(b)], c))
sb.setValue(f)
def ExitRoutine(self):
self.propdict['DepTime']=self.doubleSpinBoxdeptime.value()
if 'RespAgunBgunCoef' in self.propdict.keys(): #if resputter coef calculations done then don't re-read info even if it has been changed
for i, (sb, notused) in enumerate(zip(self.fracSpinBox, self.propdict['RespAgunBgunCoef'])):
self.propdict['RespAgunBgunCoef'][i][3]=sb.value()
return
self.propdict['RespAgunBgunCoef']=[]
elstrlist=[str(le.text()) for le in self.elLineEdit]
for k, v in GunPropertyDict(elstrlist,True).iteritems():
self.propdict[k]=v
self.propdict['ProfileParams']=[self.profiles[cbox.currentIndex()][1] for i, cbox in enumerate(self.dpComboBox) if i in self.propdict['guninds']]
self.propdict['voltages']=[sb.value() for i, sb in enumerate(self.voltSpinBox) if i in self.propdict['guninds']]
self.propdict['CenterMolRates']=[sb.value() for i, sb in enumerate(self.rateSpinBox) if i in self.propdict['guninds']]
def readdepprof(self):
f=DEPPROFILETXT()
lines=f.readlines()
self.profiles=[]
for l in lines:
EGDabc=[]
c=l
for temp in range(5):
a,b,c=c.partition('\t')
EGDabc+=[a]
EGDabc+=[stripbadcharsfromnumstr(c)]
nam='_'.join(EGDabc[:3])
try:
self.profiles+=[[nam, [eval(EGDabc[3]), eval(EGDabc[4]), eval(EGDabc[5])]]]
for cbox in self.dpComboBox:
cbox.insertItem(99, nam)
except:
continue
f.close()
def elchanged0(self):
self.pickprofile(0)
def elchanged1(self):
self.pickprofile(1)
def elchanged2(self):
self.pickprofile(2)
def elchanged3(self):
self.pickprofile(3)
def pickprofile(self, gunind):
elstr=str(self.elLineEdit[gunind].text())
if elstr=='':
temp=[i for i, prof in enumerate(self.profiles) if 'none' in prof[0]]
if len(temp)>0:
self.dpComboBox[gunind].setCurrentIndex(temp[0])
return
if gunind==0:
gunpref=[1, 3] #gun pref uses gun1to4 not the index 0 to 3
elif gunind==1:
gunpref=[1, 3]
elif gunind==2:
gunpref=[3, 1]
else:
gunpref=[4]
searchstr=['%s_%d_' %(elstr, gp) for gp in gunpref]+['Pt_%d_' %gunpref[0], '_%d_' %gunpref[0]]
for sstr in searchstr:
temp=[[i, prof[0].partition(sstr)[2]] for i, prof in enumerate(self.profiles) if sstr in prof[0]]
if len(temp)>0:
temp=sorted(temp, key=operator.itemgetter(1))
self.dpComboBox[gunind].setCurrentIndex(temp[0][0])
return
class mini_program_dialog(QDialog,
ui_mini_program_dialog.Ui_mini_program_dialog):
def __init__(self, parent, qgrid=None):
super(mini_program_dialog, self).__init__(parent)
self.setupUi(self)
self.cmdtext=''
self.txtpath=MINIPROGRAMpath()
self.initfromtxt()
@pyqtSignature("")
def on_appendPushButton_clicked(self):
if self.cmdtext=='':
self.cmdtext=str(self.programComboBox.currentText())
else:
self.cmdtext='\n'.join((self.cmdtext, str(self.programComboBox.currentText())))
@pyqtSignature("")
def on_opentxtPushButton_clicked(self):
temp=mygetopenfile(self, xpath=self.txtpath, markstr='.txt file of mini program database')
if temp!='':
self.txtpath=temp
self.initfromtxt()
def initfromtxt(self):
fin = open(self.txtpath, "r")
lines=fin.readlines()
fin.close()
self.programComboBox.clear()
currentprogram=''
for l in lines:
if l.startswith('\n'):
self.programComboBox.insertItem(99,currentprogram)
currentprogram=''
else:
currentprogram+=l
if currentprogram!='':
self.programComboBox.insertItem(99,currentprogram)
class waveset1dparamDialog(QDialog,
ui_waveset1d_params.Ui_waveset1d_params_Dialog):
def __init__(self, parent, qgrid=None):
super(waveset1dparamDialog, self).__init__(parent)
self.setupUi(self)
if not (qgrid is None):
defintpar=minmaxint_qgrid(qgrid)
self.qminSpinBox.setValue(defintpar[0])
self.qmaxSpinBox.setValue(defintpar[1])
self.qintSpinBox.setValue(defintpar[2])
class intparamDialog(QDialog,
ui_int_params.Ui_intparamDialog):
def __init__(self, parent):
super(intparamDialog, self).__init__(parent)
self.setupUi(self)
defintpar=integration_params()
self.qminSpinBox.setValue(defintpar[0])
self.qmaxSpinBox.setValue(defintpar[1])
self.qintSpinBox.setValue(defintpar[2])
class chiparamDialog(QDialog,
ui_chi_params.Ui_chiparamDialog):
def __init__(self, parent, chessh5grpstr):
super(chiparamDialog, self).__init__(parent)
self.setupUi(self)
chimin, chimax=getchiminmax(chessh5grpstr)
self.chiminSpinBox.setValue(chimin)
self.chimaxSpinBox.setValue(chimax)
class qqparamDialog(QDialog,
ui_qq_params.Ui_qqparamDialog):
def __init__(self, parent, qgrid, opts, optslabel):
super(qqparamDialog, self).__init__(parent)
self.setupUi(self)
a, b, c=minmaxint_qgrid(qgrid)
self.qminSpinBox.setValue(a)
self.qmaxSpinBox.setValue(b)
self.qintSpinBox.setValue(c)
self.typeLabel.setText(optslabel)
for item in opts:
self.typeComboBox.insertItem(99,item)
class XRDSuiteDialog(QDialog,
ui_XRDSuite_params.Ui_XRDSuite_params):
def __init__(self, parent, xtypelist, xtypelabel, imtypelist, imtypelabel, qlow, qhigh):
super(XRDSuiteDialog, self).__init__(parent)
self.setupUi(self)
self.qminSpinBox.setValue(qlow)
self.qmaxSpinBox.setValue(qhigh)
self.xtypeLabel.setText(xtypelabel)
for item in xtypelist:
self.xtypeComboBox.insertItem(99,item)
self.imtypeLabel.setText(imtypelabel)
for item in imtypelist:
self.imtypeComboBox.insertItem(99,item)
class wavepeak1dDialog(QDialog,
ui_wavepeak_1d.Ui_wavepeak1dDialog):
def __init__(self, parent, opts, optslabel, defvals=[2, 100., 20., 1.5]):
super(wavepeak1dDialog, self).__init__(parent)
self.setupUi(self)
self.typeLabel.setText(optslabel)
self.minridgelength_spinBox.setValue(defvals[0])
self.minridgewtsum_spinBox.setValue(defvals[1])
self.wavenoisecutoff_spinBox.setValue(defvals[2])
self.maxqs_spinBox.setValue(defvals[3])
for item in opts:
self.typeComboBox.insertItem(99,item)
class h5fileinfoDialog(QDialog,
ui_h5file_info.Ui_h5infoDialog):
def __init__(self, parent, h5path, h5groupstr, showattrs=True):
super(h5fileinfoDialog, self).__init__(parent)
self.setupUi(self)
self.showattrs=showattrs
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5root=h5file[h5groupstr]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
self.treeWidget=QTreeWidget() #added without knowing if it is necessary
mainitem=QTreeWidgetItem([h5groupstr], 0)
self.treeWidget.addTopLevelItem(mainitem)
self.createTree(h5root, mainitem)
self.logBrowser.setText(unicode(h5analysis.attrs['modifiedlog']))
h5file.close()
self.logLabel.setText(''.join(('log of modifications on ', h5groupstr)))
def createTree(self, startnode, parentitem):
print startnode
print startnode.listobjects()
for node in startnode.iterobjects():
if isinstance(node, h5py.Dataset):
item=QTreeWidgetItem([node.name.rpartition('/')[2]+`node.shape`], 0)
parentitem.addChild(item)
if self.showattrs:
for attrname, attrval in node.attrs.iteritems():
attritem=QTreeWidgetItem([self.attrstring(attrname, attrval)], 0)
item.addChild(attritem)
elif isinstance(node, h5py.Group):
item=QTreeWidgetItem([node.name.rpartition('/')[2]], 0)
parentitem.addChild(item)
self.createTree(node, item)
if self.showattrs:
for attrname, attrval in node.attrs.iteritems():
attritem=QTreeWidgetItem([self.attrstring(attrname, attrval)], 0)
item.addChild(attritem)
def attrstring(self, attrname, attrval):
s="'"+attrname+"':"
try:
if isinstance(attrval, str):
if len(attrval)>100:
s+=attrval[:20]+' ... '+attrval[-20:]
else:
s+=attrval
elif isinstance(attrval, int) or isinstance(attrval, float):
s+=self.numfmt(attrval)
elif isinstance(attrval, list) or isinstance(attrval, numpy.ndarray):
temp=attrval
temp2=attrval
ndim=0
while isinstance(temp, list) or isinstance(temp, numpy.ndarray):
if len(temp)==0 or len(temp2)==0:
s+='contains empty list'
return s
temp=temp[0]
temp2=temp2[-1]
ndim+=1
if isinstance(temp, str):
attrvalstr=`attrval`
attrvalstr=attrvalstr.partition('(')[2].rpartition(',')[0]
if len(attrvalstr)>100:
s+=attrvalstr[:20]+' ... '+attrvalstr[-20:]
else:
s+=attrvalstr
return s
if ndim==1:
if len(attrval)<10:
s+='['+','.join([self.numfmt(attrel) for attrel in attrval])+']'
else:
s+= '['+',...,'.join([self.numfmt(attrel) for attrel in [temp, temp2]])+']'
elif ndim==2:
s+= '[]'+',..][..,'.join([self.numfmt(attrel) for attrel in [temp, temp2]])+']]'
else:
s+='%d' %ndim +' dimmension structure with first value of '+self.numfmt(temp)
else:
raise
except:
s+='type is '+`type(attrval)`
return s
def numfmt(self, num):
if isinstance(num, int):
s='%d' %num
elif num==0.:
s='0.0'
elif numpy.abs(num)<100 and numpy.abs(num)>=.01:
s='%.4f' %num
else:
s=myexpformat(num)
return s
class plotimapwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, texture=False):
super(plotimapwindow, self).__init__(parent)
self.texturebool=texture
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
attrdict=getattr(self.h5path, self.h5groupstr)
self.bin=getbin(h5analysis)
self.pointlist=h5analysis.attrs['pointlist']
self.killmap=getkillmap(h5analysis.attrs['killmapstr'])
self.killmapbin=getkillmap(h5analysis.attrs['killmapstr'], bin=self.bin)
#for display killmap also takes out pixels not in imap - for editing killmap, don't involve imap
self.imap, self.qgrid=getimapqgrid(h5analysis.attrs['imapstr'])
self.imapbin=getimapqgrid(h5analysis.attrs['imapstr'], qgrid=False, bin=self.bin)
self.imapkillmap=self.killmap*(self.imap!=0)
self.imapkillmapbin=self.killmapbin*(self.imapbin!=0)
if self.texturebool:
self.chimap, self.chigrid=getchimapchigrid(h5analysis.attrs['chimapstr'])
self.chimapbin=getchimapchigrid(h5analysis.attrs['chimapstr'], chigrid=False, bin=self.bin)
self.dqchiimage=getdqchiimage(h5analysis.attrs['dqchiimagestr'])
self.dqchiimagebin=getdqchiimage(h5analysis.attrs['dqchiimagestr'], bin=self.bin)
self.bcknd=attrdict['bcknd']
if 'lin' in self.bcknd:
self.bckndarr, self.blinwts=readblin(h5mar)
self.bckndarrbin, self.blinwts=readblin(h5mar, bin=self.bin)
else:
bstr=''.join(('b', self.bcknd[:3]))
self.bckndarr=readh5pyarray(h5mar[bstr])
bstr=''.join((bstr, 'bin%d' %self.bin))
self.bckndarrbin=readh5pyarray(h5mar[bstr])
if self.bcknd=='minanom':
if 'bimap' in h5mar:
bimap=readh5pyarray(h5mar['bimap'])
bqgrid=h5mar['bimap'].attrs['bqgrid']
else:
bimap=None
bqgrid=None
self.banomcalc=(self.imapbin, self.qgrid, attrdict, bimap, bqgrid)
self.bminanomf=readh5pyarray(h5mar['bminanomf'])
self.imnumlist=self.pointlist[:]
self.imnamelist=['%d' %p for p in self.pointlist]
for dset in h5mar.iterobjects():
if isinstance(dset, h5py.Dataset) and len(dset.shape)==2 and (dset.name.rpartition('/')[2]).startswith('b'):
self.imnamelist+=[dset.name.rpartition('/')[2]]
h5file.close()
self.setWindowTitle('Plot integration mapping')
self.bckndCheckBox=QCheckBox()
self.bckndCheckBox.setText('subtract background\napply killmap')
self.bckndCheckBox.setChecked(True)
self.binCheckBox=QCheckBox()
self.binCheckBox.setText('use binned data')
self.binCheckBox.setChecked(True)
self.drawimapButton=QPushButton()
self.drawimapButton.setText('draw imap')
QObject.connect(self.drawimapButton,SIGNAL("pressed()"),self.drawimap)
self.imComboBox=QComboBox()
lolabel=QLabel()
lolabel.setText('low q')
hilabel=QLabel()
hilabel.setText('high q')
qmin, qmax, qint=minmaxint_qgrid(self.qgrid)
self.lowbinSpinBox=QDoubleSpinBox()
self.lowbinSpinBox.setDecimals(2)
self.lowbinSpinBox.setSingleStep(qint)
self.lowbinSpinBox.setValue(qmin)
self.lowbinSpinBox.setRange(qmin, qmax)
self.highbinSpinBox=QDoubleSpinBox()
self.highbinSpinBox.setDecimals(2)
self.highbinSpinBox.setSingleStep(qint)
self.highbinSpinBox.setValue(qmax)
self.highbinSpinBox.setRange(qmin, qmax)
spinlayout=QGridLayout()
spinlayout.addWidget(lolabel, 0, 0)
spinlayout.addWidget(hilabel, 1, 0)
spinlayout.addWidget(self.lowbinSpinBox, 0, 1)
spinlayout.addWidget(self.highbinSpinBox, 1, 1)
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
toplayout=QHBoxLayout()
toplayout.addWidget(self.bckndCheckBox)
toplayout.addWidget(self.binCheckBox)
toplayout.addWidget(self.drawimapButton)
toplayout.addWidget(self.imComboBox)
toplayout.addLayout(spinlayout)
buttonlayout=QVBoxLayout()
buttonlayout.addWidget(self.drawButton)
buttonlayout.addWidget(self.saveButton)
toplayout.addLayout(buttonlayout)
layout=QVBoxLayout()
layout.addLayout(toplayout)
self.imgLabel=QLabel()
layout.addWidget(self.imgLabel)
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
toolbar=self.plotw.gettoolbarinstance()
layout.addWidget(self.plotw)
if self.texturebool:
layout2=QHBoxLayout()
texturelayout=QVBoxLayout()
texbuttonlayout=QGridLayout()
drawchimapButton=QPushButton()
drawchimapButton.setText('draw chimap')
QObject.connect(drawchimapButton,SIGNAL("pressed()"),self.drawchimap)
genpeakButton=QPushButton()
genpeakButton.setText('list peaks')
QObject.connect(genpeakButton,SIGNAL("pressed()"),self.fillpeakSpinBox)
self.peakComboBox=QComboBox()
peaklabel=QLabel()
peaklabel.setText('peak q, counts ')
self.qwidthSpinBox=QDoubleSpinBox()
self.qwidthSpinBox.setValue(0.2)
widthlabel=QLabel()
widthlabel.setText('annulus q-width')
self.fulltexplotComboBox=QComboBox()
self.fulltexplotComboBox.clear()
self.fulltexplotComboBox.insertItem(0, 'LHS and RHS')
self.fulltexplotComboBox.insertItem(1, 'ave LHS+RHS')
self.fulltexplotComboBox.insertItem(2, 'only LHS')
self.fulltexplotComboBox.insertItem(3, 'only RHS')
self.fulltexplotComboBox.setCurrentIndex(0)
self.overlayCheckBox=QCheckBox()
self.overlayCheckBox.setText('overlay')
self.overlayCheckBox.setChecked(False)
self.rawplotCheckBox=QCheckBox()
self.rawplotCheckBox.setText('Plot raw')
self.rawplotCheckBox.setChecked(False)
texdrawButton=QPushButton()
texdrawButton.setText('draw texture')
QObject.connect(texdrawButton,SIGNAL("pressed()"),self.drawtexture)
texdrawfromfileButton=QPushButton()
texdrawfromfileButton.setText('draw texture\nfrom file')
QObject.connect(texdrawfromfileButton,SIGNAL("pressed()"),self.drawtexturefromfile)
texgrpLabel=QLabel()
texgrpLabel.setText('saved texture name, index')
self.texgrpComboBox=QComboBox()
QObject.connect(self.texgrpComboBox,SIGNAL("activated(QString)"),self.filltexgrpcombobox)
self.fromfileimComboBox=QComboBox()
texsaveButton=QPushButton()
texsaveButton.setText('save .png')
QObject.connect(texsaveButton,SIGNAL("pressed()"),self.savetexpng)
texbuttonlayout.addWidget(drawchimapButton, 0, 0, 2, 1)
# texbuttonlayout.addWidget(texgrpLabel, 0, 1, 1, 2)
# texbuttonlayout.addWidget(self.texgrpComboBox, 1, 1, 1, 2)
# texbuttonlayout.addWidget(self.fromfileimComboBox, 1, 2, 1, 1)
texbuttonlayout.addWidget(texdrawfromfileButton, 0, 1, 2, 1)
texgrplayout=QGridLayout()
texgrplayout.addWidget(texgrpLabel, 0, 0, 1, 2)
texgrplayout.addWidget(self.texgrpComboBox, 1, 0, 1, 1)
texgrplayout.addWidget(self.fromfileimComboBox, 1, 1, 1, 1)
texbuttonlayout.addLayout(texgrplayout, 0, 2, 2, 1)
texbuttonlayout.addWidget(genpeakButton, 0, 3, 2, 1)
texbuttonlayout.addWidget(self.peakComboBox, 1, 4)
texbuttonlayout.addWidget(peaklabel, 0, 4)
texbuttonlayout.addWidget(self.qwidthSpinBox, 1, 5)
texbuttonlayout.addWidget(widthlabel, 0, 5)
texbuttonlayout.addWidget(self.fulltexplotComboBox, 0, 6, 2, 1)
texbuttonlayout.addWidget(self.overlayCheckBox, 0, 7, 1, 1)
texbuttonlayout.addWidget(self.rawplotCheckBox, 1, 7, 1, 1)
texbuttonlayout.addWidget(texdrawButton, 0, 8, 1, 1)
texbuttonlayout.addWidget(texsaveButton, 1, 8, 1, 1)
texturelayout.addLayout(texbuttonlayout)
self.texplotw = plotwidget(self, width=5, height=5, dpi=100)
texturelayout.addWidget(self.texplotw)
layout2.addLayout(layout)
layout2.addLayout(texturelayout)
self.setLayout(layout2)
self.peakComboBox.clear()
self.peakComboBox.insertItem(999, 'from2D')
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
anyfromfile=False
if 'texture' in h5mar:
h5tex=h5mar['texture']
for grp in h5tex.iterobjects():
if isinstance(grp, h5py.Group) and 'icounts' in grp:
self.texgrpComboBox.insertItem(999, grp.name.rpartition('/')[2])
anyfromfile=True
h5file.close()
if anyfromfile:
self.texgrpComboBox.setCurrentIndex(0)
self.filltexgrpcombobox()
else:
self.texgrpComboBox.setDisabled(True)
self.fromfileimComboBox.setDisabled(True)
texdrawfromfileButton.setDisabled(True)
else:
self.setLayout(layout)
self.fillimComboBox()
self.imname=unicode(self.imComboBox.currentText())
if self.imname.isdigit():
self.imnum=eval(self.imname)
else:
QMessageBox.warning(self,"failed", "did not find any diffraction images")
return
self.imnum=eval(self.imname)
def fillimComboBox(self):
self.imComboBox.clear()
if len(self.imnamelist)>0:
for name in self.imnamelist:
self.imComboBox.insertItem(999, name)
else:
self.imComboBox.insertItem(0, 'err')
self.imComboBox.setCurrentIndex(0)
def fillpeakSpinBox(self):
if self.imname.isdigit():
self.imnum=eval(self.imname)
else:
QMessageBox.warning(self,"failed", "cannot extract peaks for that type of image")
return
self.peakComboBox.clear()
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'pkcounts' in h5mar:
peaks, garb, heights=peakinfo_pksavearr(h5mar['pkcounts'][self.imnum, :,:])
for tup in zip(peaks, heights):
self.peakComboBox.insertItem(999, '%.2f,%.0f' %tup)
h5file.close()
self.peakComboBox.insertItem(999, 'from2D')
def filltexgrpcombobox(self):
self.fromfileimComboBox.clear()
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5tex=h5mar['texture']
h5texgrp=h5tex[str(self.texgrpComboBox.currentText())]
pointlist=h5texgrp.attrs['pointlist']
#counts=readh5pyarray(h5texgrp['icounts'])
h5file.close()
for ind in pointlist:
self.fromfileimComboBox.insertItem(999, '%d' %ind)
def drawtexturefromfile(self):
self.imname=unicode(self.fromfileimComboBox.currentText())
try:
self.imComboBox.setCurrentIndex(self.imnamelist.index(self.imname))
pointind=eval(self.imname)#could support bin images but not yet supported
except:
QMessageBox.warning(self,"failed", "cannot find that image")
return
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5tex=h5mar['texture']
h5texgrp=h5tex[str(self.texgrpComboBox.currentText())]
self.binCheckBox.setChecked(h5texgrp.attrs['bin']>1)
self.bckndCheckBox.setChecked(h5texgrp.attrs['bckndbool']>0)
q=h5texgrp.attrs['q_peaks'][pointind]
dq=h5texgrp.attrs['qhalfwidth'][pointind]
self.highbinSpinBox.setValue(q+dq)
self.lowbinSpinBox.setValue(q-dq)
ind2d=h5texgrp['ind2d'][pointind, :, :]
ind2dinds=numpy.where((ind2d[0, :]!=32767)&(ind2d[1, :]!=32767))[0]
ind2d=(ind2d[0, ind2dinds], ind2d[1, ind2dinds])
self.draw(ind2d=ind2d)
#the order is importnat here because self.chivalse and self.countvals are se in self.draw() and then again below
self.chivals=numpy.float32(q_qgrid_ind(h5texgrp.attrs['chigrid']))
self.countvals=h5texgrp['icounts'][pointind, :]
countinds=numpy.where(numpy.logical_not(numpy.isnan(self.countvals)))
self.countvals=self.countvals[countinds]
self.chivals=self.chivals[countinds]
h5file.close()
self.texplotw.performplot([self.chivals, self.countvals], overlay=self.overlayCheckBox.isChecked(), formstr='k-')
self.texplotw.fig.canvas.draw()
def drawtexture(self):
texplotind=self.fulltexplotComboBox.currentIndex()
kstr=unicode(self.peakComboBox.currentText())
if kstr!='from2D':
kind=ind_qgrid_q(self.qgrid, eval(kstr.partition(',')[0]))
sideind=max([1, numpy.uint16(numpy.round(self.qwidthSpinBox.value()/2.0/self.qgrid[1]))])
self.highbinSpinBox.setValue(q_qgrid_ind(self.qgrid, index=kind+sideind))
self.lowbinSpinBox.setValue(q_qgrid_ind(self.qgrid, index=kind-sideind))
self.draw(bothnegpos=(lambda x: (x<=1 and (0,) or (x-1,))[0])(texplotind))
if self.rawplotCheckBox.isChecked():
self.texplotw.performplot([self.chivals, self.countvals], overlay=self.overlayCheckBox.isChecked(), formstr='k.')
# bins=numpy.uint16(range(numpy.uint16(numpy.round(min(self.chivals))), numpy.uint16(numpy.round(max(self.chivals)))+1))
# chivalsint=numpy.uint16(numpy.round(self.chivals))
# binnedchidata=numpy.float32([[chi, self.countvals[chivalsint==chi].mean()] for chi in bins if chi in chivalsint]).T
sortedchivals=list(set(self.chivals))
sortedchivals.sort()
print [self.dqchivals[self.chivals==chi].size for chi in sortedchivals]
print 'max', [numpy.max(self.dqchivals[self.chivals==chi]) for chi in sortedchivals]
binnedchidata=numpy.float32([[chi, (self.countvals[self.chivals==chi]*self.dqchivals[self.chivals==chi]).sum()/self.dqchivals[self.chivals==chi].sum()] for chi in sortedchivals if self.dqchivals[self.chivals==chi].sum()>0]).T
poschiind=numpy.where(binnedchidata[0, :]>0)
negchiind=numpy.where(binnedchidata[0, :]<0)
if texplotind==0:
self.texplotw.performplot([-1.0*binnedchidata[0][negchiind], binnedchidata[1][negchiind]], overlay=(self.overlayCheckBox.isChecked() or self.rawplotCheckBox.isChecked()))
self.texplotw.performplot([binnedchidata[0][poschiind], binnedchidata[1][poschiind]], overlay=True)
elif texplotind==1:
abschi=numpy.abs(binnedchidata[0][:])
abschireduced=sorted(list(set(abschi)))
abschidata=numpy.float32([[chi, binnedchidata[1][abschi==chi].sum()/(abschi==chi).sum()] for chi in abschireduced]).T
print numpy.float32([(abschi==chi).sum() for chi in abschireduced])
self.texplotw.performplot([abschidata[0][:], abschidata[1][:]], overlay=(self.overlayCheckBox.isChecked() or self.rawplotCheckBox.isChecked()))
elif texplotind==2:
self.texplotw.performplot([-1.0*binnedchidata[0][negchiind], binnedchidata[1][negchiind]], overlay=(self.overlayCheckBox.isChecked() or self.rawplotCheckBox.isChecked()))
else:
self.texplotw.performplot([binnedchidata[0][poschiind], binnedchidata[1][poschiind]], overlay=(self.overlayCheckBox.isChecked() or self.rawplotCheckBox.isChecked()))
#for splitting >90 and <90
# ind90=myargmax(binnedchidata[0, :]//90)
# self.texplotw.performplot([binnedchidata[0, :ind90], binnedchidata[1, :ind90]], overlay=(self.overlayCheckBox.isChecked() or self.rawplotCheckBox.isChecked()))
# self.texplotw.performplot([180-binnedchidata[0,ind90:], binnedchidata[1,ind90:]], overlay=True)
self.texplotw.fig.canvas.draw()
def draw(self, ind2d=None, bothnegpos=0):#bothnegpos should be 0 for both neative and positive chiinds, 1 for negative only and 2 for positive only, if ind2d is passed then bothnegpos is not used
self.bckndbool=self.bckndCheckBox.isChecked()
self.binbool=self.binCheckBox.isChecked()
self.imname=unicode(self.imComboBox.currentText())
if not self.imname.isdigit():
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
plotarr=readh5pyarray(h5mar[self.imname])
h5file.close()
if plotarr.shape==self.imap.shape:
imap=self.imap*self.killmap
elif plotarr.shape==self.imapbin.shape:
imap=self.imapbin*self.killmapbin
else:
QMessageBox.warning(self,"failed", "cannot draw because array shape does nto match with imap or imapbin")
return
else:
self.imnum=eval(self.imname)
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
if self.binbool:
h5arr=h5file['/'.join((self.h5groupstr, 'analysis/'+getxrdname(h5analysis)+'/countsbin%d' %self.bin))]
imap=self.imapbin*self.killmapbin
else:
h5arr=h5file['/'.join((self.h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
imap=self.imap*self.killmap
plotarr=h5arr[self.imnum, :, :]
h5file.close()
if self.bckndbool:
if self.binbool:
if self.bckndarrbin is None:
QMessageBox.warning(self,"failed", "binned background not found")
else:
if self.bcknd=='minanom':
if self.bminanomf[self.imnum, 0]<0:
QMessageBox.warning(self,"failed", "minanom background not available and will not be calculated with binning\n try again without binning but it will take while")
self.bckndbool=False
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=bckndsubtract(plotarr, self.bckndarrbin, self.imapkillmapbin, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[self.imnum, 0], self.bminanomf[self.imnum, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(self.bckndarrbin, self.imnum), self.imapkillmapbin, btype=self.bcknd, linweights=self.blinwts[self.imnum])[0]
else:
plotarr=bckndsubtract(plotarr, self.bckndarrbin, self.imapkillmapbin, btype=self.bcknd)[0]
else:
if self.bckndarr is None:
QMessageBox.warning(self,"failed", "background not found")
self.bckndbool=False
else:
if self.bcknd=='minanom':
if self.bminanomf[self.imnum, 0]<0:
print 'WARNING: calculating bminanom background (for imap plotting) on the fly: INEFFICIENT'
temp=bckndsubtract(plotarr, self.bckndarr, self.imapkillmap, btype=self.bcknd, banomcalc=self.banomcalc)
plotarr=temp[0]
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=bckndsubtract(plotarr, self.bckndarr, self.imapkillmap, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[self.imnum, 0], self.bminanomf[self.imnum, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(self.bckndarr, self.imnum), self.imapkillmap, btype=self.bcknd, linweights=self.blinwts[self.imnum])[0]
else:
plotarr=bckndsubtract(plotarr, self.bckndarr, self.imapkillmap, btype=self.bcknd)[0]
qminind=ind_qgrid_q(self.qgrid, self.lowbinSpinBox.value(), fractional=False)
qmaxind=ind_qgrid_q(self.qgrid, self.highbinSpinBox.value(), fractional=False)
if self.texturebool:
if self.binbool:
chimap=self.chimapbin
dqchiimage=self.dqchiimagebin
else:
chimap=self.chimap
dqchiimage=self.dqchiimage
if ind2d is None:
if bothnegpos==1:
ind2d=numpy.where((imap>=qminind)&(imap<=qmaxind)&(chimap<0))
elif bothnegpos==2:
ind2d=numpy.where((imap>=qminind)&(imap<=qmaxind)&(chimap>0))
else:
ind2d=numpy.where((imap>=qminind)&(imap<=qmaxind)&(chimap!=0)) #as long as the bin vals are not zero this checks for killmap because imap contains killmap, per a few lines above. the chimap!=0 is just to be safe
self.chivals=q_qgrid_ind(self.chigrid, numpy.abs(chimap[ind2d])-1)*numpy.sign(chimap[ind2d])
self.countvals=plotarr[ind2d]
self.dqchivals=dqchiimage[ind2d]
plotarrcpy=copy.copy(plotarr)
plotarr=numpy.zeros(plotarr.shape, dtype='float32')
plotarr[ind2d]=plotarrcpy[ind2d]
#plotarr[(imap>=qminind)|(imap<=qmaxind)]=0
self.plotw.performplot(plotarr)
self.savename2=self.imname
t1='%.2f' %(self.lowbinSpinBox.value())
t2='%.2f' %(self.highbinSpinBox.value())
self.savename2=''.join((self.savename2, '_q', t1, ' to ', t2))
self.imgLabel.setText(''.join(('plot of image ',self.savename2)))
self.plotw.fig.canvas.draw()
#print 'stopping', ASDGADF
def drawimap(self):
self.binbool=self.binCheckBox.isChecked()
self.bckndbool=self.bckndCheckBox.isChecked()
if self.binbool:
if self.bckndbool:
self.plotw.performplot(self.imapbin*self.killmapbin)
else:
self.plotw.performplot(self.imapbin)
else:
if self.bckndbool:
self.plotw.performplot(self.imap*self.killmap)
else:
self.plotw.performplot(self.imap)
self.repaint()
self.savename2='imap'
self.imgLabel.setText('plot of imap')
self.plotw.fig.canvas.draw()
def drawchimap(self):
self.binbool=self.binCheckBox.isChecked()
if self.binbool:
self.plotw.performplot(self.chimapbin)
else:
self.plotw.performplot(self.chimap)
self.repaint()
self.savename2='chimap'
self.imgLabel.setText('plot of chimap')
self.plotw.fig.canvas.draw()
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2))).replace('\\','/').encode())
def savetexpng(self):
self.texplotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2, '_texture'))).replace('\\','/').encode())
class plot1dintwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, navchoice, bckndedit=False, addpeaks=False, removepeaks=False, type='h5mar:icounts'):
super(plot1dintwindow, self).__init__(parent)
self.parent=parent
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.navchoice=navchoice
self.bckndedit=bckndedit
self.addpeaks=addpeaks
self.removepeaks=removepeaks
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
self.imnamelist=[]
self.type=type
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'h5mar' in type:
self.h5datagrpstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))
#qgridtemp=getimapqgrid(h5analysis.attrs['imapstr'], imap=False)
self.pointlist=h5analysis.attrs['pointlist']
self.overlayifcountsbool='ifcounts' in h5mar
# self.countsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'icounts'))
# self.processedcountsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'ifcounts'))
self.qgrid=h5mar['icounts'].attrs['qgrid']
elif 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
self.h5datagrpstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname))
#qgridtemp=h5texgrp.attrs['chigrid']
self.overlayifcountsbool=False
# self.countsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'texture', h5grpname, 'icounts'))
# self.processedcountsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'texture', h5grpname, 'ifcounts'))
self.pointlist=h5texgrp.attrs['pointlist']
self.qgrid=h5texgrp.attrs['chigrid']
self.attrdict=getattr(self.h5path, self.h5groupstr)
self.qvals=q_qgrid_ind(self.qgrid)
self.imnamelist=[]
if 'icounts' in h5file[self.h5datagrpstr]:
self.imnamelist+=['i%d' %p for p in self.pointlist]
if 'ifcounts' in h5file[self.h5datagrpstr]:
self.imnamelist+=['if%d' %p for p in self.pointlist]
if 'idcounts' in h5file[self.h5datagrpstr]:
self.imnamelist+=['id%d' %p for p in self.pointlist]
if 'imcounts' in h5file[self.h5datagrpstr]:
self.imnamelist+=['im%d' %p for p in self.pointlist]
for node in h5file[self.h5datagrpstr].iterobjects():
if (node.name.rpartition('/')[2]).startswith('i') and isinstance(node, h5py.Dataset) and len(node.shape)==1:
self.imnamelist+=[node.name.rpartition('/')[2]]
if 'additionalpeaks' in h5file[self.h5datagrpstr]:
self.additionalpeaks=list(readh5pyarray(h5file[self.h5datagrpstr]['additionalpeaks']))
txt=''
for peak in self.additionalpeaks:
txt+='%d\t%.2f\t%.2f\n' %(int(round(peak[0])), peak[1], peak[2])
addpeaktxt=txt
else:
self.additionalpeaks=[]
addpeaktxt=''
h5file.close()
L=self.attrdict['cal'][2]
wl=self.attrdict['wavelength']
psize=self.attrdict['psize']
self.tvals=twotheta_q(self.qvals, wl)
self.dvals=d_q(self.qvals)
self.pvals=pix_q(self.qvals, L, wl, psize=psize)
self.wl=wl
self.L=L
self.psize=psize
if len(self.imnamelist)==0:
print 'NO 1D IMAGES FOUND!'
return
self.setWindowTitle('Plot intensity vs scattering vector')
self.savenavimageButton=QPushButton()
self.savenavimageButton.setText('save .png\nnavigator')
QObject.connect(self.savenavimageButton,SIGNAL("pressed()"),self.savenavimage)
self.xgrid=self.attrdict['xgrid']
self.zgrid=self.attrdict['zgrid']
self.xcoords=self.attrdict['x']
self.zcoords=self.attrdict['z']
if self.navchoice==0:
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
elstr=self.attrdict['elements']
if self.navchoice==1:
infotype='DPmolfracALL'
else:
infotype='XRFmolfracALL'
self.elstrlist, self.compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=infotype)
if self.compsarr is None:
print 'NO COMPOSITION NAVIGATOR WINDOW BECAUSE PROBLEM CALCULATING COMPOSITIONS'
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
print 'COMPS:', self.compsarr
self.navw = compnavigatorwidget(self, self.compsarr, self.elstrlist)
QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
self.saveplotsoButton=QPushButton()
self.saveplotsoButton.setText('save selected\nimage as plotso')
QObject.connect(self.saveplotsoButton,SIGNAL("pressed()"),self.toplotso)
self.logCheckBox=QCheckBox()
self.logCheckBox.setText('logarithmic\nintensity')
self.logCheckBox.setChecked(False)
self.overlayCheckBox=QCheckBox()
self.overlayCheckBox.setText('overlay on\nexisting plots')
self.overlayCheckBox.setChecked(False)
self.xaxisComboBox=QComboBox()
self.xaxisComboBox.clear()
if 'h5mar' in type:
self.xaxisComboBox.insertItem(0, 'pixels')
self.xaxisComboBox.insertItem(0, 'd (nm)')
self.xaxisComboBox.insertItem(0, '2th (deg)')
self.xaxisComboBox.insertItem(0, 'q 1/nm')
elif 'h5tex' in type:
self.xaxisComboBox.insertItem(0, 'PHI (deg)')
self.xaxisComboBox.setCurrentIndex(0)
self.imComboBox=QComboBox()
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.drawpeaksButton=QPushButton()
self.drawpeaksButton.setText('draw w/ peaks')
QObject.connect(self.drawpeaksButton,SIGNAL("pressed()"),self.drawwithpeaks)
genpeakButton=QPushButton()
genpeakButton.setText('list peaks')
QObject.connect(genpeakButton,SIGNAL("pressed()"),self.fillpeakComboBox)
self.peakComboBox=QComboBox()
peaklabel=QLabel()
peaklabel.setText('peak q, counts')
peakslayout=QVBoxLayout()
peakslayout.addWidget(peaklabel)
peakslayout.addWidget(self.peakComboBox)
plotfitpeakButton=QPushButton()
plotfitpeakButton.setText('overlay\nfitted peak')
QObject.connect(plotfitpeakButton,SIGNAL("pressed()"),self.plotfitpeak)
self.addpdfButton=QPushButton()
self.addpdfButton.setText('add PDF peaks')
QObject.connect(self.addpdfButton,SIGNAL("pressed()"),self.drawpdfpeaks)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
toplayout=QHBoxLayout()
spaceLabel=QLabel()
spaceLabel.setText(' ')
toplayout.addWidget(spaceLabel)
toplayout.addWidget(spaceLabel)
toplayout.addWidget(spaceLabel)
toplayout.addWidget(self.saveplotsoButton)
toplayout.addWidget(self.savenavimageButton)
toplayout.addWidget(self.logCheckBox)
toplayout.addWidget(self.overlayCheckBox)
toplayout.addWidget(self.xaxisComboBox)
toplayout.addWidget(self.imComboBox)
toplayout.addWidget(self.drawButton)
toplayout.addWidget(self.drawpeaksButton)
toplayout.addWidget(genpeakButton)
toplayout.addLayout(peakslayout)
toplayout.addWidget(plotfitpeakButton)
toplayout.addWidget(self.addpdfButton)
toplayout.addWidget(self.saveButton)
layout=QVBoxLayout()
leftlayout=QVBoxLayout()
rightlayout=QVBoxLayout()
lefttoplayout=QGridLayout()
plotlayout=QHBoxLayout()
self.zeroSpinBox=QSpinBox()
self.zeroSpinBox.setValue(0)
self.zeroSpinBox.setRange(0,1000000 )
self.offsetSpinBox=QSpinBox()
self.offsetSpinBox.setValue(0)
self.offsetSpinBox.setRange(0,1000000 )
self.zerolineCheckBox=QCheckBox()
self.zerolineCheckBox.setText('draw zero line\nfor overlays')
self.zerolineCheckBox.setChecked(False)
self.logcutSpinBox=QSpinBox()
self.logcutSpinBox.setValue(101)
self.logcutSpinBox.setRange(0,1000000 )
self.imgLabel=QLabel()
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
lab0=QLabel()
lab1=QLabel()
lab2=QLabel()
lab3=QLabel()
if self.bckndedit:
self.newadditionfrom1dbckndsubtraction=numpy.zeros(self.qgrid[2], dtype='float32')
self.calc1dbckndButton=QPushButton()
self.calc1dbckndButton.setText('calc+plot\nnew bcknd')
QObject.connect(self.calc1dbckndButton,SIGNAL("pressed()"),self.calc1dbcknd)
lefttoplayout.addWidget(self.calc1dbckndButton, 0, 0)
self.save1dbckndButton=QPushButton()
self.save1dbckndButton.setText('save\nnew bcknd')
QObject.connect(self.save1dbckndButton,SIGNAL("pressed()"),self.save1dbcknd)
lefttoplayout.addWidget(self.save1dbckndButton, 0, 1)
self.revert1dbckndButton=QPushButton()
self.revert1dbckndButton.setText('revert to as\nintegrated icounts')
QObject.connect(self.revert1dbckndButton,SIGNAL("pressed()"),self.revert1dbcknd)
lefttoplayout.addWidget(self.revert1dbckndButton, 0, 2)
lab3.setText('index interval\nfor interp pts')
self.bckndindexintervalSpinBox=QSpinBox()
self.bckndindexintervalSpinBox.setValue(2)
self.bckndindexintervalSpinBox.setRange(1,1000)
lefttoplayout.addWidget(lab3, 1, 1)
lefttoplayout.addWidget(self.bckndindexintervalSpinBox, 1, 2)
lab0.setText('list of Q\n(comma-delim)')
lab1.setText('image\nindex')
lab2.setText('num sigma\nkill length')
lefttoplayout.addWidget(lab0, 2, 0)
lefttoplayout.addWidget(lab1, 2, 1)
lefttoplayout.addWidget(lab2, 2, 2)
self.bckndLineEditlist=[]
self.bckndComboBoxlist=[]
self.bckndSpinBoxlist=[]
self.bckndcolors=['b','g', 'c', 'y']
fullnamestemp=['blue', 'green', 'cyan', 'yellow']
for i in range(4):
ComboBox=QComboBox()
self.fillimbckndComboBox(ComboBox)
LineEdit=QLineEdit()
SpinBox=QDoubleSpinBox()
SpinBox.setValue(3.5)
SpinBox.setRange(0.0,100.0)
Label=QLabel()
Label.setText(fullnamestemp[i])
lefttoplayout.addWidget(LineEdit, i+3, 0)
lefttoplayout.addWidget(ComboBox, i+3, 1)
lefttoplayout.addWidget(SpinBox, i+3, 2)
lefttoplayout.addWidget(Label, i+3, 3)
self.bckndLineEditlist+=[LineEdit]
self.bckndComboBoxlist+=[ComboBox]
self.bckndSpinBoxlist+=[SpinBox]
elif self.addpeaks:
lab1.setText('click plot->add peak @ position\nclick nav point->add peak to its list')
lab2.setText('q-scale of new peak')
lab3.setText('q-posn of new peak')
self.addpeakclearButton=QPushButton()
self.addpeakclearButton.setText('clear the entire add\npeak list (all points)')
QObject.connect(self.addpeakclearButton,SIGNAL("pressed()"),self.addpeakclear)
self.addpeaksaveButton=QPushButton()
self.addpeaksaveButton.setText('save add peak list\nand update icounts')
QObject.connect(self.addpeaksaveButton,SIGNAL("pressed()"),self.addpeaksave)
self.addpeakscaleSpinBox=QDoubleSpinBox()
self.addpeakscaleSpinBox.setValue(.5)
self.addpeakscaleSpinBox.setRange(0.1,100.0)
self.addpeakposnSpinBox=QDoubleSpinBox()
self.addpeakposnSpinBox.setValue(50)
self.addpeakposnSpinBox.setRange(q_qgrid_ind(self.qgrid, 0), q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
self.addpeakTextBrowser=QTextBrowser()
self.addpeakTextBrowser.setReadOnly(True)
self.addpeakTextBrowser.setPlainText(addpeaktxt)
lefttoplayout.addWidget(lab1, 0, 0, 1, 2)
lefttoplayout.addWidget(self.addpeakclearButton, 1, 0)
lefttoplayout.addWidget(self.addpeaksaveButton, 1, 1)
lefttoplayout.addWidget(lab2, 2, 0)
lefttoplayout.addWidget(lab3, 2, 1)
lefttoplayout.addWidget(self.addpeakscaleSpinBox, 3, 0)
lefttoplayout.addWidget(self.addpeakposnSpinBox, 3, 1)
lefttoplayout.addWidget(self.addpeakTextBrowser, 4, 0, 3, 2)
elif self.removepeaks:
lab1.setText('click peak->remove peak @ position\nclick nav point->remove nearest peak in its list')
self.activeremoveCheckBox=QCheckBox()
self.activeremoveCheckBox.setText('remove peaks with clicks is active')
self.activeremoveCheckBox.setChecked(True)
self.peaksremoved=QSpinBox()
self.peaksremoved.setValue(0)
self.peaksremoved.setDisabled(True)
lab2.setText('number of peaks removed')
lefttoplayout.addWidget(lab1, 0, 0)
lefttoplayout.addWidget(self.activeremoveCheckBox, 1, 0)
lefttoplayout.addWidget(lab2, 2, 0)
lefttoplayout.addWidget(self.peaksremoved, 3, 0)
self.qvalueofpeakremoval=None
else:
lab1.setText('cutoff intensity\nfor log plots')
lab2.setText('intensity axis\nlower limit')
lab3.setText('offset for\noverlays')
lefttoplayout.addWidget(lab1, 0, 0)
lefttoplayout.addWidget(lab2, 0, 1)
lefttoplayout.addWidget(lab3, 0, 2)
lefttoplayout.addWidget(self.logcutSpinBox, 1, 0)
lefttoplayout.addWidget(self.zeroSpinBox, 1, 1)
lefttoplayout.addWidget(self.offsetSpinBox, 1, 2)
lefttoplayout.addWidget(self.zerolineCheckBox, 1, 3)
leftlayout.addLayout(lefttoplayout)
rightlayout.addWidget(self.imgLabel)
toolbar=self.plotw.gettoolbarinstance()
leftlayout.addWidget(self.navw)
rightlayout.addWidget(self.plotw)
plotlayout.addLayout(leftlayout)
plotlayout.addLayout(rightlayout)
layout.addLayout(toplayout)
layout.addLayout(plotlayout)
self.setLayout(layout)
self.fillimComboBox()
self.numpdflabels=0
self.offset=0
self.savecount=0
self.selectlist=[]
self.plotpeaklist=None
self.imnum=0
self.imname=unicode(self.imComboBox.currentText())
if self.imname.startswith('if') and self.imname[2:].isdigit():
self.imnum=eval(self.imname[2:])
elif self.imname.startswith('id') and self.imname[2:].isdigit():
self.imnum=eval(self.imname[2:])
elif self.imname.startswith('im') and self.imname[2:].isdigit():
self.imnum=eval(self.imname[2:])
elif self.imname.startswith('i') and self.imname[1:].isdigit():
self.imnum=eval(self.imname[1:])
self.navw.plotpoints(self.pointlist, [])
QObject.connect(self.plotw, SIGNAL("genericclickonplot"), self.clickhandler)
def clickhandler(self, clickxy):
if self.addpeaks:
self.addpeakposnSpinBox.setValue(clickxy[0])
self.addpeak()
if self.removepeaks and self.activeremoveCheckBox.isChecked():
self.qvalueofpeakremoval=clickxy[0]
self.removepeak()
def fillimComboBox(self):
self.imComboBox.clear()
if len(self.imnamelist)>0:
for name in self.imnamelist:
self.imComboBox.insertItem(999, name)
else:
self.imComboBox.insertItem(0, 'err')
self.imComboBox.setCurrentIndex(0)
def fillimbckndComboBox(self, box):
box.clear()
box.insertItem(0, 'notused')
for pointind in self.pointlist:
box.insertItem(999, '%d' %pointind)
box.setCurrentIndex(0)
def drawwithpeaks(self):
self.imname=unicode(self.imComboBox.currentText())
if self.imname.startswith('if'):
temp=self.imname[2:]
elif self.imname.startswith('id'):
temp=self.imname[2:]
elif self.imname.startswith('im'):
temp=self.imname[2:]
else:
temp=self.imname[1:]
if temp.isdigit():
self.imnum=eval(temp)
pkcmd="h5file[self.h5datagrpstr]['pkcounts'][self.imnum,:,:]"
else:
pkcmd="h5file[self.h5datagrpstr]['pk'+temp][:,:]"
h5file=h5py.File(self.h5path, mode='r')
try:
peaks=eval(pkcmd)
except:
h5file.close()
print 'abort: problem getting peak data for ', self.imname
return
qvals, garb, heights=peakinfo_pksavearr(peaks)
sortind=numpy.argsort(qvals)
qvals=qvals[sortind]
heights=heights[sortind]
a, b, c=minmaxint_qgrid(self.qgrid)
withinqgridinds=numpy.where((qvals>a)&(qvals<b))[0]
if len(withinqgridinds)!=len(qvals):
QMessageBox.warning(self,"warning", "some peaks positions beyond edges of dataset")
if not self.imname.startswith('if'):
pkinds=numpy.uint16(numpy.round(ind_qgrid_q(self.qgrid, qvals)))
pkinds=pkinds[withinqgridinds]
cmpneighbor=pkinds[:-1]==pkinds[1:]
if numpy.any(cmpneighbor):
QMessageBox.warning(self,"warning", "some peaks perfectly overlap, only plotting one of the overlaps with the correct height")
cmpneighbor=numpy.logical_not(numpy.append(cmpneighbor, numpy.bool_([False])))
pkinds=pkinds[cmpneighbor]
withinqgridinds=withinqgridinds[cmpneighbor]
if len(pkinds)>0:
heights[withinqgridinds]=h5file[self.h5datagrpstr]['icounts'][self.imnum, pkinds]#if icounts then heights of peaks plotted as the ictouns value, except if the posn is beyond the limits
h5file.close()
xtype=unicode(self.xaxisComboBox.currentText())
if 'pix' in xtype:
xvals=pix_q(qvals, self.L, self.wl, psize=self.psize)
elif '(nm)' in xtype:
xvals=d_q(qvals)
elif '2' in xtype:
xvals=twotheta_q(qvals, self.wl)
else:
xvals=qvals
self.plotpeaklist=[xvals, heights]
self.draw()
def draw(self):
h5file=h5py.File(self.h5path, mode='r')
self.imname=unicode(self.imComboBox.currentText())
if self.imname.startswith('if'):
temp=self.imname[2:]
h5counts=h5file[self.h5datagrpstr]['ifcounts']
elif self.imname.startswith('id'):
temp=self.imname[2:]
h5counts=h5file[self.h5datagrpstr]['idcounts']
elif self.imname.startswith('im'):
temp=self.imname[2:]
h5counts=h5file[self.h5datagrpstr]['imcounts']
else:
temp=self.imname[1:]
h5counts=h5file[self.h5datagrpstr]['icounts']
if temp.isdigit():
self.imnum=eval(temp)
icmd="h5counts[self.imnum,:]"
else:
icmd="h5file[self.h5datagrpstr][self.imname][:]"
try:
plotarr=eval(icmd)
except:
h5file.close()
print 'abort: problem getting data for ', self.imname
self.plotpeaklist=None
return
h5file.close()
xtype=unicode(self.xaxisComboBox.currentText())
xtransformed=True
if 'pix' in xtype:
xvals=self.pvals
t1='pix'
elif '(nm)' in xtype:
xvals=self.dvals
# plotarr=numpy.array([plotarr[-1*i-1] for i in range(plotarr.size)])
t1='d'
elif '2' in xtype:
xvals=self.tvals
t1='2th'
else:
xvals=self.qvals
if 'PHI' in xtype:
t1='PHI'
else:
t1='q'
xtransformed=False
notnaninds=numpy.where(numpy.logical_not(numpy.isnan(plotarr)))
xvals=xvals[notnaninds]
plotarr=plotarr[notnaninds]
if self.logCheckBox.isChecked():
plotarr[plotarr<self.logcutSpinBox.value()]=self.logcutSpinBox.value()
if self.overlayCheckBox.isChecked():
if self.logCheckBox.isChecked():
self.offset+=(self.offset==0)
self.offset*=self.offsetSpinBox.value()
plotarr*=self.offset
else:
self.offset+=self.offsetSpinBox.value()
plotarr+=self.offset
if self.zerolineCheckBox.isChecked():
xvals=numpy.concatenate((numpy.array([xvals[-1], xvals[0]]), xvals))
plotarr=numpy.concatenate((numpy.array([self.offset, self.offset]), plotarr))
else:
self.offset=0
self.selectlist=[]
if not self.imname.startswith('ib'):
self.selectlist+=[self.imnum]
if (len(self.selectlist)+self.imname.startswith('ib'))==1:
self.savename2=''.join(('_Ivs', t1,'_', self.imname))
else:
self.savename2=''.join((self.savename2,'_', self.imname))
ylowlim=self.zeroSpinBox.value()
if ylowlim==0:
ylowlim=None
if self.bckndedit:
plotarr+=self.newadditionfrom1dbckndsubtraction
#self.plotw.axes.plot(xvals, plotarr,'k-', linewidth=2)
if not self.plotpeaklist is None:
self.plotpeaklist=[self.plotpeaklist[0], self.plotpeaklist[1]+self.offset]
self.plotw.performplot([xvals, plotarr], overlay=self.overlayCheckBox.isChecked(), log=self.logCheckBox.isChecked(), ylowlimit=ylowlim, peaklist=self.plotpeaklist)
self.plotpeaklist=None
if self.addpeaks:
if xtransformed:
print 'added peaks will only be plotted for q-axis'
else:
ylim=self.plotw.axes.get_ylim()
for peak in self.additionalpeaks:
if self.imnum==peak[0]:
self.plotw.axes.plot([peak[2], peak[2]], [ylim[0], ylim[1]], 'r-')
self.navw.plotpoints(self.pointlist, [], select=self.selectlist)
self.imgLabel.setText(self.savename2)
self.plotw.fig.canvas.draw()
self.navw.fig.canvas.draw()
def toplotso(self):
self.imname=unicode(self.imComboBox.currentText())
if self.imname.startswith('if'):
temp=self.imname[2:]
icmd="h5file[self.h5datagrpstr]['ifcounts'][self.imnum,:]"
elif self.imname.startswith('id'):
temp=self.imname[2:]
icmd="h5file[self.h5datagrpstr]['idcounts'][self.imnum,:]"
elif self.imname.startswith('im'):
temp=self.imname[2:]
icmd="h5file[self.h5datagrpstr]['imcounts'][self.imnum,:]"
else:
temp=self.imname[1:]
icmd="h5file[self.h5datagrpstr]['icounts'][self.imnum,:]"
if temp.isdigit():
self.imnum=eval(temp)
else:
icmd="h5file[self.h5datagrpstr][temp][:]"
h5file=h5py.File(self.h5path, mode='r')
try:
plotarr=eval(icmd)
except:
h5file.close()
print 'abort: problem getting data for ', self.imname
self.plotpeaklist=None
return
h5file.close()
xtype=unicode(self.xaxisComboBox.currentText())
if 'pix' in xtype:
xvals=self.pvals
t1='pix'
elif '(nm)' in xtype:
xvals=self.dvals
# plotarr=numpy.array([plotarr[-1*i-1] for i in range(plotarr.size)])
t1='d'
elif '2' in xtype:
xvals=self.tvals
t1='2th'
else:
xvals=self.qvals
t1='q'
notnaninds=numpy.where(numpy.logical_not(numpy.isnan(plotarr)))
xvals=xvals[notnaninds]
plotarr=plotarr[notnaninds]
writeplotso(self.runpath, xvals, plotarr, self.attrdict, t1, ''.join((self.savename1, '_Ivs', t1, '_', self.imname)))
def picclickprocess(self, picnum):
picname='i%d' %picnum #set selection to innn but then if ifnnn exists, set it to that instead
if picname in self.imnamelist:
for i in range(len(self.imnamelist)):
if self.imnamelist[i]==picname:
self.imComboBox.setCurrentIndex(i)
break
picname='if%d' %picnum
if picname in self.imnamelist:
for i in range(len(self.imnamelist)):
if self.imnamelist[i]==picname:
self.imComboBox.setCurrentIndex(i)
break
# if not self.overlayCheckBox.isChecked():
# self.selectlist=[]
#
# self.selectlist+=[self.imnum]
self.draw()
self.navw.plotpoints(self.pointlist, [], select=self.selectlist)
if self.addpeaks:
self.addpeak()
if self.removepeaks and self.activeremoveCheckBox.isChecked() and not (self.qvalueofpeakremoval is None):
self.removepeak()
self.navw.fig.canvas.draw()
def drawpdfpeaks(self):
if 'h5tex' in self.type:
idialog=pdfsearchDialog(self.parent, self.plotw, self.offset, filename='TextureDatabase.txt', cvtfcn=lambda x:x)
else:
idialog=pdfsearchDialog(self.parent, self.plotw, self.offset)
idialog.exec_()
# idialog=pdfDialog(self)
# if idialog.exec_():
# label=unicode(idialog.labellineEdit.text())
# colstr=unicode(idialog.colorlineEdit.text())
# if colstr=='':
# colstr='r'
# pdf=idialog.pdflist[idialog.pdfcomboBox.currentIndex()]
# h=idialog.heightSpinBox.value()
# self.plotw.axes.hold(True)
# for q, height in pdf:
# if h!=0:
# height=h
# else:
# height*=(self.plotw.axes.get_ylim()[1]-self.offset)*0.8
# self.plotw.axes.plot([q, q], [self.offset, self.offset+height], colstr)
# if label!='':
# for garbage in range(self.numpdflabels):
# label=''.join((' ', label))
# self.numpdflabels+=1
# ylim=self.plotw.axes.get_ylim()
# xlim=self.plotw.axes.get_xlim()
# print xlim, ylim
# self.plotw.axes.text(xlim[0]+.05*(xlim[1]-xlim[0]), ylim[1]-.05*(ylim[1]-ylim[0]), label, color=colstr, fontsize=14)
# self.plotw.fig.canvas.draw()
def calc1dbcknd(self): #only supported for 'h5mar' type
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
ibmin=h5mar['ibmin']
self.plotw.axes.clear()
self.plotw.axes.hold(True)
self.alteredbcknd=ibmin[:]
self.newadditionfrom1dbckndsubtraction=ibmin[:]*0.0
self.savedictbcknd1d={}
self.savedictbcknd1d['imageindeces']=[]
self.savedictbcknd1d['peakexclusionwidths']=[]
self.savedictbcknd1d['interpindexinterval']=self.bckndindexintervalSpinBox.value()
self.savedictbcknd1d['enteredqvals']=[]
self.savedictbcknd1d['enteredexclusionwidths']=[]
imnum_sig_col=[]
enteredqvals_sig_col=[]
for i in range(4):
imnumstr=unicode(self.bckndComboBoxlist[i].currentText())
lestr=str(self.bckndLineEditlist[i].text())
if len(lestr)>0:
try:
eqv=numpy.float32(eval('['+lestr+']'))
if len(eqv.shape)!=1:
raise
enteredqvals_sig_col+=[(eqv, self.bckndSpinBoxlist[i].value(), self.bckndcolors[i])]
continue
except:
print 'FORMAT ERROR ON ENTERED Q-VALS. should be comma delimited Q-vals.'
if imnumstr.isdigit():
imnum_sig_col+=[(eval(imnumstr), self.bckndSpinBoxlist[i].value(), self.bckndcolors[i])]
if len(imnum_sig_col)==0 and len(enteredqvals_sig_col)==0:
return
bckndinds=set(range(int(round(self.qgrid[2]))))
for qvals, sigwidth, col in enteredqvals_sig_col:
self.savedictbcknd1d['enteredqvals']+=list(qvals)
self.savedictbcknd1d['enteredexclusionwidths']+=[sigwidth]*len(qvals)
peakposn=ind_qgrid_q(self.qgrid, qvals, fractional=True)
s=sigwidth/self.qgrid[1]
for p in peakposn:
bckndinds-=set(range(int(round(p-s)), int(round(p+s))+1))
for imnum, sigwidth, col in imnum_sig_col:
self.savedictbcknd1d['imageindeces']+=[imnum]
self.savedictbcknd1d['peakexclusionwidths']+=[sigwidth]
counts=h5mar['ifcounts'][imnum][:]
peakposn, peaksig, garb=peakinfo_pksavearr(h5mar['pkcounts'] [imnum, :, :])
peakposn=ind_qgrid_q(self.qgrid, peakposn, fractional=True)
peaksig=sigwidth*peaksig/self.qgrid[1]
for p, s in zip(peakposn, peaksig):
bckndinds-=set(range(int(round(p-s)), int(round(p+s))+1))
self.plotw.axes.plot(self.qvals, counts, col)
bckndinds=sorted(list(bckndinds))
self.alteredbcknd=fillgapswithinterp(range(int(round(self.qgrid[2]))), bckndinds, ibmin[bckndinds], indexinterval_fitinds=self.bckndindexintervalSpinBox.value())
self.plotw.axes.plot(self.qvals, ibmin, 'k')
self.plotw.axes.plot(self.qvals, self.alteredbcknd, 'r')
self.newadditionfrom1dbckndsubtraction=ibmin-self.alteredbcknd
self.plotw.fig.canvas.draw()
self.savename2='1dbckndalteration'
self.imgLabel.setText(self.savename2)
h5file.close()
def save1dbcknd(self):
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
icountspoint=h5mar['icounts']
if 'asintegratedicounts' in h5mar:
del h5mar['asintegratedicounts']
print 'WARNING:There should not have been an existing icounts_asintegrated but it is being overwritten anyway'
icountsasint=h5mar.create_dataset('asintegratedicounts', data=icountspoint[:, :])
icountsasint.attrs['bcknd1daddition']=self.newadditionfrom1dbckndsubtraction
for key, val in self.savedictbcknd1d.iteritems():
if isinstance(val, list) and len(val)==0:
continue
icountsasint.attrs[key]=val
for pointind in self.pointlist:
icountspoint[pointind, :]+=self.newadditionfrom1dbckndsubtraction[:]
if 'ibckndadd' in h5mar:
del h5mar['ibckndadd']
h5mar.create_dataset('ibckndadd', data=self.newadditionfrom1dbckndsubtraction)
if 'ibminnew' in h5mar:
del h5mar['ibminnew']
if 'ibmin' in h5mar:
h5mar.create_dataset('ibminnew', data=h5mar['ibmin'][:]-self.newadditionfrom1dbckndsubtraction[:])
h5file.close()
def revert1dbcknd(self):
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'asintegratedicounts' in h5mar:
icountspoint=h5mar['icounts']
asintegratedpoint=h5mar['asintegratedicounts']
for pointind in self.pointlist:
icountspoint[pointind, :]=asintegratedpoint[pointind, :]
del h5mar['asintegratedicounts']
h5file.close()
def addpeak(self):
self.additionalpeaks+=[[self.imnum, self.addpeakscaleSpinBox.value(), self.addpeakposnSpinBox.value()]]
txt=''
for peak in self.additionalpeaks:
txt+='%d\t%.2f\t%.2f\n' %(int(round(peak[0])), peak[1], peak[2])
self.addpeakTextBrowser.setPlainText(txt)
def addpeakclear(self):
h5file=h5py.File(self.h5path, mode='r+')
if 'additionalpeaks' in h5file[self.h5datagrpstr]:
del h5file[self.h5datagrpstr]['additionalpeaks']
else:
self.additionalpeaks=[]
h5file.close()
def addpeaksave(self):
h5file=h5py.File(self.h5path, mode='r+')
if 'additionalpeaks' in h5file[self.h5datagrpstr]:
del h5file[self.h5datagrpstr]['additionalpeaks']
else:
grp=h5file[self.h5datagrpstr].create_dataset('additionalpeaks', data=numpy.float32(self.additionalpeaks))
grp.attrs['usedinfitting']=0
h5file.close()
def removepeak(self):
h5file=h5py.File(self.h5path, mode='r+')
pkqvals=h5file[self.h5datagrpstr]['pkcounts'][self.imnum, 0, :]
ind=myargmin((pkqvals-self.qvalueofpeakremoval)**2)
print self.qvalueofpeakremoval
print (pkqvals-self.qvalueofpeakremoval)**2
print h5file[self.h5datagrpstr]['pkcounts'][self.imnum, 0, :]
h5file[self.h5datagrpstr]['pkcounts'][self.imnum, :, ind]=numpy.float32([numpy.nan]*h5file[self.h5datagrpstr]['pkcounts'].shape[1])
print self.imnum, ind
print h5file[self.h5datagrpstr]['pkcounts'][self.imnum, 0, :]
h5file.close()
self.peaksremoved.setValue(1+self.peaksremoved.value())
def fillpeakComboBox(self):
self.imname=unicode(self.imComboBox.currentText())
if self.imname.startswith('if'):
temp=self.imname[2:]
else:
temp=self.imname[1:]
if temp.isdigit():
self.imnum=eval(temp)
self.peakComboBox.clear()
h5file=h5py.File(self.h5path, mode='r')
if 'pkcounts' in h5file[self.h5datagrpstr]:
peaks, garb, heights=peakinfo_pksavearr(h5file[self.h5datagrpstr]['pkcounts'][self.imnum, :,:])
for tup in zip(peaks, heights):
self.peakComboBox.insertItem(999, '%.2f,%.0f' %tup)
h5file.close()
self.peakComboBox.insertItem(999, 'sum of all')
def plotfitpeak(self):
if not ('q' in unicode(self.xaxisComboBox.currentText()) or 'PHI' in unicode(self.xaxisComboBox.currentText())):
print 'overlay fitted peaks only available for plotting vs q'
return
h5file=h5py.File(self.h5path, mode='r')
q_pk, sig_pk, ht_pk=peakinfo_pksavearr(h5file[self.h5datagrpstr]['pkcounts'][self.imnum, :,:]) #this could be done more somply but this is safest
peakfcn=eval(h5file[self.h5datagrpstr]['pkcounts'].attrs['peakshape'])
h5file.close()
if unicode(self.peakComboBox.currentText())=='sum of all':
qvals=self.qvals
gaussvals=numpy.zeros(qvals.size, dtype='float32')
for q, sig, ht in zip(q_pk, sig_pk, ht_pk):
gaussvals+=peakfcn([q, sig, ht], qvals)#ht*numpy.exp(-0.5*((qvals-q)/sig)**2)
else:
pkindex=self.peakComboBox.currentIndex()
q_pk=q_pk[pkindex]
sig_pk=sig_pk[pkindex]
ht_pk=ht_pk[pkindex]
qvals=self.qvals[(self.qvals>=q_pk-3.0*sig_pk)&(self.qvals<=q_pk+3.0*sig_pk)]
gaussvals=peakfcn([q_pk, sig_pk, ht_pk], qvals)#ht_pk*numpy.exp(-1.0*((qvals-q_pk)/sig_pk)**2)
self.plotw.axes.hold(True)
#self.plotw.axes.plot(qvals, gaussvals, 'r--', linewidth=3)
self.plotw.performplot([qvals, gaussvals], overlay=True)
self.plotw.fig.canvas.draw()
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2))).replace('\\','/').encode())
def savenavimage(self):
self.navw.save(os.path.join(self.runpath, ''.join((self.savename1, '_IntPlotPoints', '%d' %self.savecount))).replace('\\','/').encode())
self.savecount+=1
#class associationtree(QDialog,
# ui_associationtree.Ui_associationtreeForm):
#
# def __init__(self, parent, maingrp):
# super(associationtree, self).__init__(parent)
# self.setupUi(self)
# dergrp=maingrp.Derived
# pointlist=maingrp._f_getAttr('pointlist')
# qgrid=dergrp.imap._f_getAttr('qgrid')
# qgrid_qq=dergrp.qq._f_getAttr('qgrid')
# numstrlist=['%03d' %num for num in pointlist]
#
# qqpkspoint=dergrp.qqpks
# qqpks=numpy.empty(qqpkspoint.shape, dtype=numpy.uint16)
# qqpks[:, :]=qqpkspoint[:, :]
#
# kindsets_innn_qqind=[[set([]) for temp in range(len(pointlist))] for temp2 in range(qqpks.shape[0])]
# pointcount=-1
# for numstr in numstrlist:
# pointcount+=1
##for this routine keep h5file open in read only the whole time so just use the pointers
# atabnnn=eval(''.join(('dergrp.atab', numstr)))
# annn=eval(''.join(('dergrp.a', numstr)))
# knnn=eval(''.join(('dergrp.k', numstr)))
#
## annnpoint=eval(''.join(('dergrp.a', numstr)))
## annn=numpy.empty(annnpoint.shape, dtype=numpy.int32)
## annn[:, :]=annnpoint[:, :]
##
## knnnpoint=eval(''.join(('dergrp.k', numstr)))
## knnn=numpy.empty(knnnpoint.shape, dtype=numpy.float32)
## knnn[:]=knnnpoint[:]
#
# kindsets_qqind=kindsets_qqind_atab(atabnnn, qqpks.shape[0])
# qqindsets_kind, unassoc=readannn(annn)
# mainitemA=QTreeWidgetItem([numstr], 0)
# mainitemB=QTreeWidgetItem([numstr], 0)
# self.treeAWidget.addTopLevelItem(mainitemA)
# self.treeBWidget.addTopLevelItem(mainitemB)
# count=-1
# for s in qqindsets_kind:
# count+=1
# if len(s)>0:
# item=QTreeWidgetItem(['k%d(%.2f)' %(count, q_qgrid_ind(qgrid, knnn[count]))], 0)
# mainitemA.addChild(item)
# for qqind in s:
# subitem=QTreeWidgetItem(['qq%d(%.2f,%.2f)' %(qqind, q_qgrid_ind(qgrid_qq, qqpks[qqind, 0]), q_qgrid_ind(qgrid_qq, qqpks[qqind, 1]))], 0)
# item.addChild(subitem)
# for kind in unassoc:
# item=QTreeWidgetItem(['k%d(%.2f)' %(kind, q_qgrid_ind(qgrid, knnn[kind]))], 0)
# mainitemA.addChild(item)
# count=-1
# for s in kindsets_qqind:
# count+=1
# if len(s)>0:
# item=QTreeWidgetItem(['qq%d(%.2f,%.2f)' %(count, q_qgrid_ind(qgrid_qq, qqpks[count, 0]), q_qgrid_ind(qgrid_qq, qqpks[count, 1]))], 0)
# mainitemA.addChild(item)
# for kind in s:
# subitem=QTreeWidgetItem(['k%d(%.2f)' %(kind, q_qgrid_ind(qgrid, knnn[kind]))], 0)
# item.addChild(subitem)
# kindsets_innn_qqind[count][pointcount]|=s
# count_qq=-1
# for list_point in kindsets_innn_qqind:
# count_qq+=1
# mainitemC=QTreeWidgetItem(['qq%d(%.2f,%.2f)' %(count_qq, q_qgrid_ind(qgrid_qq, qqpks[count_qq, 0]), q_qgrid_ind(qgrid_qq, qqpks[count_qq, 1]))], 0)
# self.treeCWidget.addTopLevelItem(mainitemC)
# count_point=-1
# for s in list_point:
# count_point+=1
# if len(s)>0:
# item=QTreeWidgetItem([numstrlist[count_point]], 0)
# knnn=eval(''.join(('dergrp.k', numstrlist[count_point])))
# mainitemC.addChild(item)
# for kind in s:
# subitem=QTreeWidgetItem(['k%d(%.2f)' %(kind, q_qgrid_ind(qgrid, knnn[kind]))], 0)
# item.addChild(subitem)
class plotqqwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, navchoice, displaytrees=False):
super(plotqqwindow, self).__init__(parent)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.navchoice=navchoice
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
self.imnamelist=[]
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if not ('qq' in h5mar):
QMessageBox.warning(self,"failed", 'aborted qqplot because cannot find qq')
h5file.close()
return
self.qq=readh5pyarray(h5mar['qq'])
self.qgrid_qq=h5mar['qq'].attrs['qgrid']
attrdict=getattr(self.h5path, self.h5groupstr)
self.pointlist=h5analysis.attrs['pointlist']
self.qgrid=h5mar['icounts'].attrs['qgrid']
self.qvals=q_qgrid_ind(self.qgrid)
self.imnamelist=[]
if 'qqcounts' in h5mar:#this shouldn't be necessary
self.imnamelist+=['%d' %p for p in self.pointlist]
#commenting on April 2009 becvause do not have atab stuff figured out yet
# testlist=['qq','qqpktab']
# testlist+=['a%03d' %picnum for picnum in self.pointlist]
# testlist+=['atab%03d' %picnum for picnum in self.pointlist]
# testlist+=['k%03d' %picnum for picnum in self.pointlist]
# boollist=[not st in nodenames for st in testlist]
# treewidgetbool=numpy.sum(boollist)==0
treewidgetbool=False
self.qqnormexists='qqnorm' in h5mar
self.qqanlzdexists='qqpktab' in h5mar
h5file.close()
self.setWindowTitle('Plot scattering vector correlation (qq)')
self.savenavimageButton=QPushButton()
self.savenavimageButton.setText('save .png\nnavigator')
QObject.connect(self.savenavimageButton,SIGNAL("pressed()"),self.savenavimage)
self.xgrid=attrdict['xgrid']
self.zgrid=attrdict['zgrid']
self.xcoords=attrdict['x']
self.zcoords=attrdict['z']
if self.navchoice==0:
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
elstr=attrdict['elements']
if self.navchoice==1:
infotype='DPmolfracALL'
else:
infotype='XRFmolfracALL'
self.elstrlist, self.compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=infotype)
if self.compsarr is None:
print 'NO COMPOSITION NAVIGATOR WINDOW BECAUSE PROBLEM CALCULATING COMPOSITIONS'
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
print 'COMPS:', self.compsarr
self.navw = compnavigatorwidget(self, self.compsarr, self.elstrlist)
QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
self.logCheckBox=QCheckBox()
self.logCheckBox.setText('logarithmic\nintensity')
self.logCheckBox.setChecked(False)
self.imComboBox=QComboBox()
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
toplayout=QHBoxLayout()
toplayout.addWidget(self.savenavimageButton)
toplayout.addWidget(self.logCheckBox)
toplayout.addWidget(self.imComboBox)
toplayout.addWidget(self.drawButton)
toplayout.addWidget(self.saveButton)
layout=QVBoxLayout()
#leftlayout=QVBoxLayout()
rightlayout=QVBoxLayout()
#lefttoplayout=QGridLayout()
plotlayout=QHBoxLayout()
self.imgLabel=QLabel()
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
rightlayout.addWidget(self.imgLabel)
rightlayout.addWidget(self.plotw)
plotlayout.addWidget(self.navw)
plotlayout.addLayout(rightlayout)
layout.addLayout(toplayout)
layout.addLayout(plotlayout)
if displaytrees and treewidgetbool:
superlayout=QHBoxLayout()
superlayout.addLayout(layout)
treelabelsLayout=QHBoxLayout()
for msg in ['1d spectrum->instanced qq peak->associated 1d peaks', '1d spectrum->1d peak->associated qq peaks', 'qq peaks->1d spectrum containing peak->1d peaks']:
aLabel=QLabel()
aLabel.setText(msg)
treelabelsLayout.addWidget(aLabel)
treeLayout=QHBoxLayout()
self.treeAWidget=QTreeWidget()
self.treeBWidget=QTreeWidget()
self.treeCWidget=QTreeWidget()
treeLayout.addWidget(self.treeAWidget)
treeLayout.addWidget(self.treeBWidget)
treeLayout.addWidget(self.treeCWidget)
treebuttonLayout=QHBoxLayout()
treeAbutton=QPushButton()
treeAbutton.setText('plot selection\n(select either type of peak)')
QObject.connect(treeAbutton,SIGNAL("pressed()"),self.drawtreeA)
treeBbutton=QPushButton()
treeBbutton.setText('plot selection\n(select either type of peak)')
QObject.connect(treeBbutton,SIGNAL("pressed()"),self.drawtreeB)
treeCbutton=QPushButton()
treeCbutton.setText('plot selection\n(select either type of peak)')
QObject.connect(treeCbutton,SIGNAL("pressed()"),self.drawtreeC)
treebuttonLayout.addWidget(treeAbutton)
treebuttonLayout.addWidget(treeBbutton)
treebuttonLayout.addWidget(treeCbutton)
fulltreeLayout=QVBoxLayout()
fulltreeLayout.addLayout(treelabelsLayout)
fulltreeLayout.addLayout(treeLayout)
fulltreeLayout.addLayout(treebuttonLayout)
superlayout.addLayout(fulltreeLayout)
#superlayout.addWidget(associationtree(self, grp))
#h5file=tables.openFile(self.h5path, mode='r')
grp=eval(self.fullgrpstr)
self.fillintrees(grp)
h5file.close()
self.setLayout(superlayout)
else:
self.setLayout(layout)
self.fillimComboBox()
self.imname=unicode(self.imComboBox.currentText())
if self.imname=='qq':
self.imnum=999
elif self.imname=='qqnorm':
self.imnum=998
elif self.imname=='qqanlzd':
self.imnum=997
else:
self.imnum=eval(self.imname)
self.navw.plotpoints(self.pointlist, [])
self.navw.fig.canvas.draw()
def fillintrees(self, maingrp):#April 2009 this doesn't work becauseqqpktab and other stuff not worked out yet
qqpkinds=numpy.uint16([[arow['qqindhigh'], arow['qqindlow']] for arow in dergrp.qqpktab])
kindsets_innn_qqind=[[set([]) for temp in range(len(self.pointlist))] for temp2 in range(qqpkinds.shape[0])]
pointcount=-1
for numstr in numstrlist:
pointcount+=1
#for this routine keep h5file open in read only the whole time so just use the pointers
atabnnn=eval(''.join(('dergrp.atab', numstr)))
annn=eval(''.join(('dergrp.a', numstr)))
knnn=eval(''.join(('dergrp.k', numstr)))
# annnpoint=eval(''.join(('dergrp.a', numstr)))
# annn=numpy.empty(annnpoint.shape, dtype=numpy.int32)
# annn[:, :]=annnpoint[:, :]
#
# knnnpoint=eval(''.join(('dergrp.k', numstr)))
# knnn=numpy.empty(knnnpoint.shape, dtype=numpy.float32)
# knnn[:]=knnnpoint[:]
kindsets_qqind=kindsets_qqind_atab(atabnnn, qqpkinds.shape[0])
qqindsets_kind, unassoc=readannn(annn)
mainitemA=QTreeWidgetItem([numstr], 0)
mainitemB=QTreeWidgetItem([numstr], 0)
self.treeAWidget.addTopLevelItem(mainitemA)
self.treeBWidget.addTopLevelItem(mainitemB)
for count, s in enumerate(qqindsets_kind):
if len(s)>0:
item=QTreeWidgetItem(['k%d(%.2f)' %(count, q_qgrid_ind(self.qgrid, knnn[count]))], 0)
mainitemA.addChild(item)
for qqind in s:
subitem=QTreeWidgetItem(['qq%d(%.2f,%.2f)' %(qqind, q_qgrid_ind(self.qgrid_qq, qqpkinds[qqind, 0]), q_qgrid_ind(self.qgrid_qq, qqpkinds[qqind, 1]))], 0)
item.addChild(subitem)
for kind in unassoc:
item=QTreeWidgetItem(['k%d(%.2f)' %(kind, q_qgrid_ind(self.qgrid, knnn[kind]))], 0)
mainitemA.addChild(item)
for count, s in enumerate(kindsets_qqind):
if len(s)>0:
item=QTreeWidgetItem(['qq%d(%.2f,%.2f)' %(count, q_qgrid_ind(self.qgrid_qq, qqpkinds[count, 0]), q_qgrid_ind(self.qgrid_qq, qqpkinds[count, 1]))], 0)
mainitemB.addChild(item)
for kind in s:
subitem=QTreeWidgetItem(['k%d(%.2f)' %(kind, q_qgrid_ind(self.qgrid, knnn[kind]))], 0)
item.addChild(subitem)
kindsets_innn_qqind[count][pointcount]|=s
for count_qq, list_point in enumerate(kindsets_innn_qqind):
mainitemC=QTreeWidgetItem(['qq%d(%.2f,%.2f)' %(count_qq, q_qgrid_ind(self.qgrid_qq, qqpkinds[count_qq, 0]), q_qgrid_ind(self.qgrid_qq, qqpkinds[count_qq, 1]))], 0)
self.treeCWidget.addTopLevelItem(mainitemC)
count_point=-1
for s in list_point:
count_point+=1
if len(s)>0:
item=QTreeWidgetItem([numstrlist[count_point]], 0)
knnn=eval(''.join(('dergrp.k', numstrlist[count_point])))
mainitemC.addChild(item)
for kind in s:
subitem=QTreeWidgetItem(['k%d(%.2f)' %(kind, q_qgrid_ind(self.qgrid, knnn[kind]))], 0)
item.addChild(subitem)
def fillimComboBox(self):
self.imComboBox.clear()
if len(self.imnamelist)>0:
for name in self.imnamelist:
self.imComboBox.insertItem(999, name[2:])
else:
self.imComboBox.insertItem(0, 'err')
self.imComboBox.insertItem(999, 'qq')
if self.qqnormexists:
self.imComboBox.insertItem(999, 'qqnorm')
if self.qqanlzdexists:
self.imComboBox.insertItem(999, 'qqanlzd')
def drawtreeA(self):
temp=self.treeAWidget.selectedItems()
if len(temp)>0:
item=temp[0]
if unicode(item.text(0)).startswith('qq'):
qqlist=[eval(''.join(('[', unicode(item.text(0)).partition('(')[2].partition(')')[0], ']')))]
klist=[eval(unicode(item.parent().text(0)).partition('(')[2].partition(')')[0])]
elif unicode(item.text(0)).startswith('k'):
klist=[eval(unicode(item.text(0)).partition('(')[2].partition(')')[0])]
qqlist=[]
for chnum in range(item.childCount()):
qqlist+=[eval(''.join(('[', unicode(item.child(chnum).text(0)).partition('(')[2].partition(')')[0], ']')))]
self.drawfromtree(klist, qqlist)
def drawtreeB(self):
temp=self.treeBWidget.selectedItems()
if len(temp)>0:
item=temp[0]
if unicode(item.text(0)).startswith('k'):
klist=[eval(unicode(item.text(0)).partition('(')[2].partition(')')[0])]
qqlist=[eval(''.join(('[', unicode(item.parent().text(0)).partition('(')[2].partition(')')[0], ']')))]
elif unicode(item.text(0)).startswith('qq'):
qqlist=[eval(''.join(('[', unicode(item.text(0)).partition('(')[2].partition(')')[0], ']')))]
klist=[]
for chnum in range(item.childCount()):
klist+=[eval(unicode(item.child(chnum).text(0)).partition('(')[2].partition(')')[0])]
self.drawfromtree(klist, qqlist)
def drawtreeC(self):
temp=self.treeCWidget.selectedItems()
if len(temp)>0:
item=temp[0]
if unicode(item.text(0)).startswith('k'):
klist=[eval(unicode(item.text(0)).partition('(')[2].partition(')')[0])]
qqlist=[]
elif unicode(item.text(0)).startswith('qq'):
qqlist=[eval(''.join(('[', unicode(item.text(0)).partition('(')[2].partition(')')[0], ']')))]
klist=[]
self.drawfromtree(klist, qqlist)
def drawfromtree(self, klist, qqlist):
if len(klist)==0:
redindarr=None
else:
redindarr=ind_qgrid_q(self.qgrid_qq, numpy.array(klist))
if len(qqlist)==0:
blueind2darr=None
else:
blueind2darr=ind_qgrid_q(self.qgrid_qq, numpy.array(qqlist))
self.plotw.performqqtreeplot(self.qq.T, redindarr, blueind2darr, self.qvals)
self.savename2=''.join(('_qqAssociations'))
self.navw.plotpoints(self.pointlist, [])
self.plotw.fig.canvas.draw()
self.navw.fig.canvas.draw()
self.imgLabel.setText(self.savename2)
def draw(self):
self.imname=unicode(self.imComboBox.currentText())
if self.imname=='qq':
self.imnum=999
self.imname=''
select=[]
elif self.imname=='qqnorm':
self.imnum=998
self.imname='norm'
select=[]
elif self.imname=='qqanlzd':
self.imnum=997
self.imname='anlzd'
select=[]
else:
self.imnum=eval(self.imname)
select=[self.imnum]
if self.imnum==997:#April 2009 this doesn't work becauseqqpktab and other stuff not worked out yet
#h5file=tables.openFile(self.h5path, mode='r')
dergrp=eval(self.fulldergrpstr)
plotarrtup=makeqqnormpeakplotimage(self.qq, qqpktuplist_h5qqpktab(dergrp.qqpktab))
h5file.close()
temp=numpy.empty(plotarrtup[0].shape)
for i in [0, 1, 2]:
temp[:, :, i]=plotarrtup[0][:, :, i].T
self.plotw.performqqnormpeakplot(temp, qvals=self.qvals)
elif self.imnum==999:
self.plotw.performplot(self.qq.T, upperorigin=False, axesformat='qq', qvals=self.qvals)
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
plotarr=h5mar['qqcounts'][self.imnum, :, :]
h5file.close()
self.plotw.performplot(plotarr.T, upperorigin=False, axesformat='qq', qvals=self.qvals)
self.savename2=''.join(('_qq', self.imname))
self.navw.plotpoints(self.pointlist, [], select=select)
self.plotw.fig.canvas.draw()
self.navw.fig.canvas.draw()
self.imgLabel.setText(self.savename2)
def picclickprocess(self, picnum):
picname='%d' %picnum
if picname in self.imnamelist:
for i in range(len(self.imnamelist)):
if self.imnamelist[i]==picname:
self.imComboBox.setCurrentIndex(i)
break
self.draw()
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2))).replace('\\','/').encode())
def savenavimage(self):
self.navw.save(os.path.join(self.runpath, ''.join((self.savename1, '_qqpoint'))).replace('\\','/').encode())
class plotdatwindow(QDialog):
def __init__(self, parent, runpath):
super(plotdatwindow, self).__init__(parent)
self.runpath=runpath
self.setWindowTitle('Plot images from binary files')
self.logCheckBox=QCheckBox()
self.logCheckBox.setText('logarithmic\nintensity')
self.logCheckBox.setChecked(False)
self.drawButton=QPushButton()
self.drawButton.setText('select and draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
toplayout=QHBoxLayout()
toplayout.addWidget(self.logCheckBox)
toplayout.addWidget(self.drawButton)
toplayout.addWidget(self.saveButton)
#layout=QVBoxLayout()
#leftlayout=QVBoxLayout()
rightlayout=QVBoxLayout()
#lefttoplayout=QGridLayout()
self.imgLabel=QLabel()
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
rightlayout.addLayout(toplayout)
rightlayout.addWidget(self.imgLabel)
rightlayout.addWidget(self.plotw)
self.setLayout(rightlayout)
self.datpath=self.runpath
def draw(self):
temp = mygetopenfile(self, xpath=self.datpath,markstr='XRD binary image')
if temp!='':
self.datpath=temp
self.savename=os.path.splitext(os.path.split(self.datpath)[1])[0]
data = numpy.fromfile(self.datpath, dtype='uint16') #TODO: make the data type less constrictive
data.shape = (numpy.sqrt(len(data)), numpy.sqrt(len(data)))
self.plotw.performplot(data, log=self.logCheckBox.isChecked())
self.plotw.fig.canvas.draw()
self.imgLabel.setText(self.savename)
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename, '.png'))).replace('\\','/').encode())
class plothistwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, navchoice):
super(plothistwindow, self).__init__(parent)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.navchoice=navchoice
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
self.imnamelist=[]
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((self.h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
self.bin=getbin(h5analysis)
self.attrdict=getattr(self.h5path, self.h5groupstr)
self.pointlist=h5analysis.attrs['pointlist']
self.qgrid=h5mar['icounts'].attrs['qgrid']
self.qvals=q_qgrid_ind(self.qgrid)
self.imnamelist=[]
for group in h5mar:
if isinstance(group, h5py.Group):
gname=group.name.rpartition('/')[2]
for node in h5mar.iterobjects():
if isinstance(node, h5py.Dataset):
if len(node.shape)==2 and node.shape[0]==h5marcounts.shape[0]:
self.imnamelist+= [node.name.rpartition('/')[2]]
elif len(node.shape)==3 and node.shape[0]==h5marcounts.shape: #this isn't exclusive enough but oh well
self.imnamelist+=[node.name.rpartition('/')[2]+'_%d' %p for p in self.pointlist]
self.imnamelist+=['raw-%d' %p for p in self.pointlist]
self.killmap=getkillmap(h5analysis.attrs['killmapstr'])
self.killmapbin=getkillmap(h5analysis.attrs['killmapstr'], bin=self.bin)
#for display killmap also takes out pixels not in imap - for editing killmap, don't involve imap
self.imap, self.qgrid=getimapqgrid(h5analysis.attrs['imapstr'])
self.imapbin=getimapqgrid(h5analysis.attrs['imapstr'], qgrid=False, bin=self.bin)
self.killmap*=(self.imap!=0)
self.killmapbin*=(self.imapbin!=0)
self.bcknd=self.attrdict['bcknd']
bstr=''.join(('b', self.bcknd[:3]))
self.bckndarr=readh5pyarray(h5mar[bstr])
bstr=''.join((bstr, 'bin%d' %self.bin))
self.bckndarrbin=readh5pyarray(h5mar[bstr])
if self.bcknd=='minanom':
if 'bimap' in h5mar:
bimap=readh5pyarray(h5mar['bimap'])
bqgrid=h5mar['bimap'].attrs['bqgrid']
else:
bimap=None
bqgrid=None
self.banomcalc=(self.imapbin, self.qgrid, self.attrdict, bimap, bqgrid)
self.bminanomf=readh5pyarray(h5mar['bminanomf'])
h5file.close()
self.imnamelist.sort()
self.killCheckBox=QCheckBox()
self.killCheckBox.setText('apply kill map\nin main image')
self.killCheckBox.setChecked(True)
self.bckndCheckBox=QCheckBox()
self.bckndCheckBox.setText('subtract background')
self.bckndCheckBox.setChecked(True)
self.setWindowTitle('Plot histogram of single pixel counts')
self.fromdatButton=QPushButton()
self.fromdatButton.setText('select .dat\nbinary file')
QObject.connect(self.fromdatButton,SIGNAL("pressed()"),self.fromdat)
self.savenavimageButton=QPushButton()
self.savenavimageButton.setText('save .png\nnavigator')
QObject.connect(self.savenavimageButton,SIGNAL("pressed()"),self.savenavimage)
self.xgrid=self.attrdict['xgrid']
self.zgrid=self.attrdict['zgrid']
self.xcoords=self.attrdict['x']
self.zcoords=self.attrdict['z']
if self.navchoice==0:
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
elstr=self.attrdict['elements']
if self.navchoice==1:
infotype='DPmolfracALL'
else:
infotype='XRFmolfracALL'
self.elstrlist, self.compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=infotype)
if self.compsarr is None:
print 'NO COMPOSITION NAVIGATOR WINDOW BECAUSE PROBLEM CALCULATING COMPOSITIONS'
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
print 'COMPS:', self.compsarr
self.navw = compnavigatorwidget(self, self.compsarr, self.elstrlist)
QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
self.savetxtButton=QPushButton()
self.savetxtButton.setText('save selected\nimage as ASCII')
QObject.connect(self.savetxtButton,SIGNAL("pressed()"),self.savetxt)
self.overlayCheckBox=QCheckBox()
self.overlayCheckBox.setText('overlay on\nexisting plots')
self.overlayCheckBox.setChecked(False)
self.imComboBox=QComboBox()
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
toplayout=QHBoxLayout()
toplayout.addWidget(self.fromdatButton)
toplayout.addWidget(self.killCheckBox)
toplayout.addWidget(self.bckndCheckBox)
toplayout.addWidget(self.savenavimageButton)
toplayout.addWidget(self.overlayCheckBox)
toplayout.addWidget(self.imComboBox)
toplayout.addWidget(self.drawButton)
toplayout.addWidget(self.saveButton)
toplayout.addWidget(self.savetxtButton)
layout=QVBoxLayout()
leftlayout=QVBoxLayout()
rightlayout=QVBoxLayout()
lefttoplayout=QGridLayout()
plotlayout=QHBoxLayout()
self.startSpinBox=QSpinBox()
self.startSpinBox.setValue(0)
self.startSpinBox.setRange(0,10000000 )
self.intSpinBox=QSpinBox()
self.intSpinBox.setValue(0)
self.intSpinBox.setRange(0,10000000 )
self.numSpinBox=QSpinBox()
self.numSpinBox.setValue(1000)
self.numSpinBox.setRange(0,10000000 )
self.imgLabel=QLabel()
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
lab1=QLabel()
lab2=QLabel()
lab3=QLabel()
lab1.setText('lowest counts')
lab2.setText('width of counts bins\nzero->auto')
lab3.setText('number of bins')
lefttoplayout.addWidget(lab1, 0, 0)
lefttoplayout.addWidget(lab2, 0, 1)
lefttoplayout.addWidget(lab3, 0, 2)
lefttoplayout.addWidget(self.startSpinBox, 1, 0)
lefttoplayout.addWidget(self.intSpinBox, 1, 1)
lefttoplayout.addWidget(self.numSpinBox, 1, 2)
leftlayout.addLayout(lefttoplayout)
rightlayout.addWidget(self.imgLabel)
leftlayout.addWidget(self.navw)
rightlayout.addWidget(self.plotw)
plotlayout.addLayout(leftlayout)
plotlayout.addLayout(rightlayout)
layout.addLayout(toplayout)
layout.addLayout(plotlayout)
self.setLayout(layout)
self.fillimComboBox()
self.savecount=0
self.selectlist=[]
#self.imnum=0
self.imname=unicode(self.imComboBox.currentText())
self.navw.plotpoints(self.pointlist, [])
self.killbool=False
self.bckndbool=False
self.binbool=False
self.dat=False
self.datpath=self.runpath
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5chess=CHESSRUNFILE()
self.circkillmap=readh5pyarray(h5chess[getxrdname(h5analysis)+'killmap'])
self.circkillmapbin=readh5pyarray(h5chess[getxrdname(h5analysis)+'killmapbin%d' %self.bin])
h5chess.close()
h5file.close()
def fillimComboBox(self):
self.imComboBox.clear()
if len(self.imnamelist)>0:
for name in self.imnamelist:
self.imComboBox.insertItem(999, name)
else:
self.imComboBox.insertItem(0, 'err')
self.imComboBox.setCurrentIndex(0)
def draw(self):
self.imname=unicode(self.imComboBox.currentText())
self.dat=False
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((self.h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
if '-' in self.imname:
imtype, garb, imnum=self.imname.partition('-')
if imtype=='raw':
plotarr=h5marcounts[imnum, :, :]
else:
plotarr=h5mar[imtype][imnum, :, :]
else:
imtype=None
plotarr=readh5pyarray(h5mar[self.imname])
h5file.close()
if not self.overlayCheckBox.isChecked():
self.selectlist=[]
self.selectlistnav=[]
self.selectlist+=[self.imname]
if len(self.selectlist)==1:
self.savename2=''.join(('_hist', '_', self.imname))
else:
self.savename2=''.join((self.savename2,'_', self.imname))
temp=self.imname[1:]
self.killbool=False
self.bckndbool=False
self.binbool=False
diffracbool=not (imtype is None)
if diffracbool:
self.selectlistnav+=[imnum]
self.navw.plotpoints(self.pointlist, [], select=self.selectlistnav)
self.killbool=self.killCheckBox.isChecked()
self.bckndbool=self.bckndCheckBox.isChecked()
self.binbool='bin' in imtype
else:
if not self.overlayCheckBox.isChecked():
self.navw.plotpoints(self.pointlist, [], select=[])
self.killbool=self.killCheckBox.isChecked()
totpix=None
if diffracbool:
if self.bckndbool:
if self.binbool:
if self.bckndarrbin is None:
QMessageBox.warning(self,"failed", "binned background not found")
else:
if self.bcknd=='minanom':
if self.bminanomf[imnum, 0]<0:
QMessageBox.warning(self,"failed", "minanom background not available and will not be calculated with binning\n try again without binning but it will take while")
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=bckndsubtract(plotarr, self.bckndarrbin, self.killmapbin, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[imnum, 0], self.bminanomf[imnum, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(self.bckndarrbin, imnum), self.killmapbin, btype=self.bcknd, linweights=self.blinwts[imnum])[0]
else:
plotarr=bckndsubtract(plotarr, self.bckndarrbin, self.killmapbin, btype=self.bcknd)[0]
totpix=self.killmapbin.sum()
else:
if self.bckndarr is None:
QMessageBox.warning(self,"failed", "background not found")
else:
if self.bcknd=='minanom':
if self.bminanomf[imnum, 0]<0:
print 'WARNING: calculating bminanom background (for histogram analysis) on the fly: INEFFICIENT'
temp=bckndsubtract(plotarr, self.bckndarr, self.killmap, btype=self.bcknd, banomcalc=self.banomcalc)
plotarr=temp[0]
else:
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
h5file.close()
plotarr=bckndsubtract(plotarr, self.bckndarr, self.killmap, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[imnum, 0], self.bminanomf[imnum, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(self.bckndarr, imnum), self.killmap, btype=self.bcknd, linweights=self.blinwts[imnum])[0]
else:
plotarr=bckndsubtract(plotarr, self.bckndarr, self.killmap, btype=self.bcknd)[0]
totpix=self.killmap.sum()
elif self.killbool:
if self.binbool:
plotarr*=self.killmapbin
totpix=self.killmapbin.sum()
else:
plotarr*=self.killmap
totpix=self.killmap.sum()
else:#bcknd image or killmap or something
if self.killbool:
if plotarr.shape[0]==self.killmap.shape[0]:
plotarr*=self.killmap
totpix=self.killmap.sum()
elif plotarr.shape[0]==self.killmapbin.shape[0]:
plotarr*=self.killmapbin
totpix=self.killmapbin.sum()
else:
QMessageBox.warning(self,"failed", "killmap selected but neither killmap nor \n binned killmap are correct size")
self.createhist(plotarr, totpix=totpix)
self.navw.fig.canvas.draw()
self.imgLabel.setText(''.join((self.savename2, ': ', self.histstr)))
def savetxt(self):
self.imname=unicode(self.imComboBox.currentText())
if self.dat:
name=''.join((self.datsavename, '_hist'))
else:
name=''.join((self.savename1, self.savename2))
header=''.join(('!histogram of counts. center values of bins and frequency given below. ', self.histstr))
writenumtotxtfile(self.runpath, self.vals, self.counts, name, header=header)
def picclickprocess(self, picnum):
picname='raw-%d' %picnum
if picname in self.imnamelist:
for i in range(len(self.imnamelist)):
if self.imnamelist[i]==picname:
self.imComboBox.setCurrentIndex(i)
break
self.draw()
def save(self):
if self.dat:
self.plotw.save(os.path.join(self.runpath, ''.join((self.datsavename, '_hist'))).replace('\\','/').encode())
else:
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2))).replace('\\','/').encode())
def savenavimage(self):
if self.dat:
self.navw.save(os.path.join(self.runpath, ''.join((self.datsavename, '_HistPlotPoints', '%d' %self.savecount))).replace('\\','/').encode())
else:
self.navw.save(os.path.join(self.runpath, ''.join((self.savename1, '_HistPlotPoints', '%d' %self.savecount))).replace('\\','/').encode())
self.savecount+=1
def fromdat(self):
temp = mygetopenfile(self, xpath=self.datpath,markstr='XRD binary image')
if temp!='':
self.datpath=temp
self.datsavename=os.path.splitext(os.path.split(self.datpath)[1])[0]
data = numpy.fromfile(self.datpath, dtype='uint16')
data.shape = (numpy.sqrt(len(data)), numpy.sqrt(len(data)))
self.dat=True
self.createhist(data)
def createhist(self, data, totpix=None):
#if already applying a killmap, send the total # of pixels used. if not, then will apply the default ciruclar killmap
a=self.startSpinBox.value()
b=self.intSpinBox.value()
c=self.numSpinBox.value()
if totpix is None:
if self.circkillmap.shape==data.shape:
kdata=data*self.circkillmap
totpix=self.circkillmap.sum()
elif self.circkillmapbin.shape==data.shape:
kdata=data*self.circkillmapbin
totpix=self.circkillmapbin.sum()
else:
self.circkillmapbin=binboolimage(self.circkillmap, bin=data.shape[0]/self.circkillmap[0])
kdata=data*self.circkillmapbin
totpix=self.circkillmapbin.sum()
else:
kdata=data
if b==0:
b=(kdata.max()-a)/(1.0*c)
self.vals=numpy.array(range(c), dtype='float32')*b+a+b/2
slots=numpy.array(range(c+1), dtype='float32')*b+a
self.counts=numpy.array([((kdata>slots[i])&(kdata<=slots[i+1])).sum() for i in range(c)])/(1.0*totpix)
belowcounts=(kdata<=slots[0]).sum()-kdata.shape[0]**2+totpix #get rid of all the zeros from killmap
abovecounts=(kdata>slots[-1]).sum()
self.plotw.performplot([self.vals, self.counts], overlay=self.overlayCheckBox.isChecked())
self.histstr=''.join(('%d'%belowcounts, 'pixels with counts <=', '%d'%slots[0],' and ','%d'%abovecounts, 'pixels with counts >', '%d'%slots[-1], '. Total pixels: ', '%d'%totpix))
self.plotw.fig.canvas.draw()
class plotwavetrans1dwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, navchoice, type='h5mar:icounts'):
super(plotwavetrans1dwindow, self).__init__(parent)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.navchoice=navchoice
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
self.imnamelist=[]
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'h5mar' in type:
self.wtgrpstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'wavetrans1d'))
qgridtemp=getimapqgrid(h5analysis.attrs['imapstr'], imap=False)
self.pointlist=h5analysis.attrs['pointlist']
self.overlayifcountsbool='ifcounts' in h5mar
self.countsarrstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'icounts'))
self.processedcountsarrstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis),'ifcounts'))
elif 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
self.wtgrpstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname, 'wavetrans1d'))
qgridtemp=h5texgrp.attrs['chigrid']
self.overlayifcountsbool=False
self.countsarrstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname, 'icounts'))
self.processedcountsarrstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname, 'ifcounts'))
self.pointlist=h5texgrp.attrs['pointlist']
wtgrp=h5file[self.wtgrpstr]
self.attrdict=getattr(self.h5path, self.h5groupstr)
self.qgrid=wtgrp.attrs['qgrid'] #use the wave trans qgrid as the qgrid because it is the union of it and the icounts qgrid
self.qscalegrid=wtgrp.attrs['qscalegrid']
self.qposngrid=wtgrp.attrs['qposngrid']
self.icountsind=numpy.array([qval in q_qgrid_ind(self.qgrid) for qval in q_qgrid_ind(qgridtemp)])
self.imnamelist=[]
self.imnamelist+=['%d' %p for p in self.pointlist]
for node in wtgrp.iterobjects():
if (node.name.rpartition('/')[2]).startswith('wt') and isinstance(node, h5py.Dataset) and len(node.shape)==2:
self.imnamelist+=[node.name.rpartition('/')[2]]
h5file.close()
if len(self.imnamelist)==0:
print 'NO 1D IMAGES FOUND!'
return
self.setWindowTitle('Plot wavelet trnasform of 1d spectra')
self.savenavimageButton=QPushButton()
self.savenavimageButton.setText('save .png\nnavigator')
QObject.connect(self.savenavimageButton,SIGNAL("pressed()"),self.savenavimage)
self.xgrid=self.attrdict['xgrid']
self.zgrid=self.attrdict['zgrid']
self.xcoords=self.attrdict['x']
self.zcoords=self.attrdict['z']
if self.navchoice==0:
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
elstr=self.attrdict['elements']
if self.navchoice==1:
infotype='DPmolfracALL'
else:
infotype='XRFmolfracALL'
self.elstrlist, self.compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=infotype)
if self.compsarr is None:
print 'NO COMPOSITION NAVIGATOR WINDOW BECAUSE PROBLEM CALCULATING COMPOSITIONS'
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
print 'COMPS:', self.compsarr
self.navw = compnavigatorwidget(self, self.compsarr, self.elstrlist)
QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
self.colridgesCheckBox=QCheckBox()
self.colridgesCheckBox.setText('color ridges\nbyWT value')
self.colridgesCheckBox.setChecked(True)
self.peaksCheckBox=QCheckBox()
self.peaksCheckBox.setText('include\npeaks')
self.peaksCheckBox.setChecked(True)
if self.overlayifcountsbool:
self.ifcountsCheckBox=QCheckBox()
self.ifcountsCheckBox.setText('use ifcounts\nprocessed data')
self.ifcountsCheckBox.setChecked(False)
self.plotComboBox=QComboBox()
self.plotComboBox.clear()
self.plotComboBox.insertItem(999, '2D W.T. w/ 1D data')
self.plotComboBox.insertItem(999, '2D W.T. w/ WT@scale')
self.plotComboBox.insertItem(999, 'overlay 1D data')
self.plotComboBox.insertItem(999, 'overlay WT@scale')
self.plotComboBox.setCurrentIndex(0)
self.imComboBox=QComboBox()
self.scaleComboBox=QComboBox()
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
if False:
self.fittedpeaksButton=QPushButton()
self.fittedpeaksButton.setText('overlay\nfitted peaks')
QObject.connect(self.fittedpeaksButton,SIGNAL("pressed()"),self.drawfittedpeaks)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
toplayout=QHBoxLayout()
toplayout.addWidget(self.savenavimageButton)
toplayout.addWidget(self.colridgesCheckBox)
toplayout.addWidget(self.peaksCheckBox)
if self.overlayifcountsbool:
toplayout.addWidget(self.ifcountsCheckBox)
toplayout.addWidget(self.plotComboBox)
toplayout.addWidget(self.imComboBox)
toplayout.addWidget(self.scaleComboBox)
toplayout.addWidget(self.drawButton)
if False:
toplayout.addWidget(self.fittedpeaksButton)
toplayout.addWidget(self.saveButton)
layout=QVBoxLayout()
leftlayout=QVBoxLayout()
rightlayout=QVBoxLayout()
lefttoplayout=QGridLayout()
plotlayout=QHBoxLayout()
self.unusedSpinBox=QSpinBox()
self.unusedSpinBox.setValue(0)
self.unusedSpinBox.setRange(0,1000000 )
self.imgLabel=QLabel()
self.plotw=wavelet1dplotwidget(self, self.qgrid, self.qscalegrid, self.qposngrid)
QObject.connect(self.plotw, SIGNAL("dataaxesclicked"), self.clickhandler)
lab1=QLabel()
lab2=QLabel()
lab1.setText('click peak->remove peak @ position')
self.activeremoveCheckBox=QCheckBox()
self.activeremoveCheckBox.setText('remove peaks with clicks is active')
self.activeremoveCheckBox.setChecked(False)
self.peaksremoved=QSpinBox()
self.peaksremoved.setValue(0)
self.peaksremoved.setDisabled(True)
lab2.setText('number of peaks removed')
lefttoplayout.addWidget(self.activeremoveCheckBox, 0, 0, 1, 3)
lefttoplayout.addWidget(lab1, 1, 0, 1, 3)
lefttoplayout.addWidget(lab2, 2, 0, 1, 2)
lefttoplayout.addWidget(self.peaksremoved, 2, 2, 1, 1)
self.qvalueofpeakremoval=None
leftlayout.addLayout(lefttoplayout)
rightlayout.addWidget(self.imgLabel)
leftlayout.addWidget(self.navw)
rightlayout.addWidget(self.plotw)
plotlayout.addLayout(leftlayout)
plotlayout.addLayout(rightlayout)
layout.addLayout(toplayout)
layout.addLayout(plotlayout)
self.setLayout(layout)
self.fillimComboBox()
self.fillscaleComboBox()
self.savecount=0
self.selectlist=[]
self.imnum=0
self.imname=unicode(self.imComboBox.currentText())
self.navw.plotpoints(self.pointlist, [])
def fillimComboBox(self):
self.imComboBox.clear()
if len(self.imnamelist)>0:
for name in self.imnamelist:
self.imComboBox.insertItem(999, name)
else:
self.imComboBox.insertItem(0, 'err')
self.imComboBox.setCurrentIndex(0)
def fillscaleComboBox(self):
self.scaleComboBox.clear()
for s in scale_scalegrid_ind(self.qscalegrid):
self.scaleComboBox.insertItem(999, 'scale %.2f' %s)
self.scaleComboBox.setCurrentIndex(0)
def clickhandler(self, clickxy):
# if self.addpeaks:
# self.addpeakposnSpinBox.setValue(clickxy[0])
# self.addpeak()
if self.activeremoveCheckBox.isChecked():
self.qvalueofpeakremoval=clickxy[0]
self.removepeak()
def removepeak(self):
h5file=h5py.File(self.h5path, mode='r+')
wtgrp=h5file[self.wtgrpstr]
if not 'peaks' in wtgrp:
print "PEAKS HAVE NOT BEEN IDENTIFIED"
h5file.close()
return
pkscaleind=wtgrp['peaks'][self.imnum, 0, :]
pkposnind=wtgrp['peaks'][self.imnum, 1, :]
pkqvals=numpy.float32(pkposnind[pkposnind!=32767])
ind=myargmin((pkqvals-self.qvalueofpeakremoval)**2)
print 'removing peak at ', self.qvalueofpeakremoval
#print (pkqvals-self.qvalueofpeakremoval)**2
print (numpy.append(numpy.append(pkscaleind[:ind],pkscaleind[ind+1:]),numpy.uint16([32767]))).dtype
wtgrp['peaks'][self.imnum, 0, :]=numpy.append(numpy.append(pkscaleind[:ind],pkscaleind[ind+1:]),numpy.uint16([32767]))[:]
wtgrp['peaks'][self.imnum, 1, :]=numpy.append(numpy.append(pkposnind[:ind],pkposnind[ind+1:]),numpy.uint16([32767]))[:]
print self.imnum, ind
h5file.close()
self.peaksremoved.setValue(1+self.peaksremoved.value())
def picclickprocess(self, picnum):
picname='%d' %picnum
if picname in self.imnamelist:
for i in range(len(self.imnamelist)):
if self.imnamelist[i]==picname:
self.imComboBox.setCurrentIndex(i)
break
self.draw()
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.savename2))).replace('\\','/').encode())
def savenavimage(self):
self.navw.save(os.path.join(self.runpath, ''.join((self.savename1, '_WT1dPlotPoints', '%d' %self.savecount))).replace('\\','/').encode())
self.savecount+=1
def drawfittedpeaks(self):
print 'not implemented yet'
def draw(self):
self.imname=unicode(self.imComboBox.currentText())
if self.imname.isdigit():
self.imnum=eval(self.imname)
else:
print 'plotting wavetrans of auxiliary data is not yet supported'
self.selectlist=[self.imnum]
if self.colridgesCheckBox.isChecked():
wtcmap=cm.jet
ridgecmap=cm.gray
else:
wtcmap=cm.gray
ridgecmap=None
self.savename2=''.join(('_wavetrans1d_', self.imname))
plottype=self.plotComboBox.currentIndex()
if plottype==0: #2D W.T. w/ 1D data
overlay=False
w_o_c=self.countsarrstr
if self.overlayifcountsbool:
if self.ifcountsCheckBox.isChecked():
w_o_c=self.processedcountsarrstr
elif plottype==1: #2D W.T. w/ WT@scale
overlay=False
w_o_c=self.scaleComboBox.currentIndex()
elif plottype==2: #overlay 1D data
overlay=True
w_o_c=self.countsarrstr
if self.overlayifcountsbool:
if self.ifcountsCheckBox.isChecked():
w_o_c=self.processedcountsarrstr
elif plottype==3: #overlay WT@scale
overlay=True
w_o_c=self.scaleComboBox.currentIndex()
else:
QMessageBox.warning(self,"failed", 'ABORTED. PLOTTING NOT SUPPORTED:', unicode(self.plotComboBox.currentText()))
return
self.display_wavetrans1dcaller(w_o_c, title='', wtcmap=wtcmap, ridgecmap=ridgecmap, overlay1donly=overlay)
self.navw.plotpoints(self.pointlist, [], select=self.selectlist)
self.navw.fig.canvas.draw()
self.imgLabel.setText(self.savename2)
def display_wavetrans1dcaller(self, wavescaleind_or_countsname, wtcmap=cm.jet, ridgecmap=cm.gray, title='', overlay1donly=False):
#datascaleind gives the index of the scale parameter to use in the 1D spectrum plot. if it is None the 1D data from icounts will be displayed
h5file=h5py.File(self.h5path, mode='r')
wtgrp=h5file[self.wtgrpstr]
wt=wtgrp['wavetrans'][self.imnum, :, :]
if 'ridges' in wtgrp:
ridges=wtgrp['ridges'][self.imnum, :, :]
ridges=ridges[ridges.mean(axis=1)!=32767, :]
else:
ridges=[]
datapeakind=None
if isinstance(wavescaleind_or_countsname, str):
datascaleind=None
print h5file[wavescaleind_or_countsname].shape, h5file[wavescaleind_or_countsname][self.imnum].shape, h5file[wavescaleind_or_countsname][self.imnum][self.icountsind].shape
data=h5file[wavescaleind_or_countsname][self.imnum][self.icountsind]
if ('peaks' in wtgrp) and self.peaksCheckBox.isChecked():
datapeakind=wtgrp['peaks'][self.imnum, 1, :]
datapeakind=datapeakind[datapeakind!=32767]
datapeakind=ind_qgrid_q(self.qgrid, q_qgrid_ind(self.qposngrid, datapeakind), fractional=True)
else:
datascaleind=wavescaleind_or_countsname
data=wt[datascaleind, :]
if ('ridges' in wtgrp) and self.peaksCheckBox.isChecked():
ridgesatscale=ridges[:, wt.shape[0]-1-datascaleind]
datapeakind=ridgesatscale[(ridgesatscale>=0)&(ridgesatscale!=32767)]
h5file.close()
if overlay1donly:
self.plotw.plot1doverlay(data, datascaleind, datapeakind=datapeakind)
else:
self.plotw.display_wavetrans1d(wt, ridges, data, datascaleind=datascaleind, datapeakind=datapeakind, wtcmap=wtcmap, ridgecmap=ridgecmap, title='')
self.plotw.fig.canvas.draw()
class plotinterpimageof1ddatawindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath, navchoice, style='interp', type='h5mar'):
super(plotinterpimageof1ddatawindow, self).__init__(parent)
self.type=type
self.texturestyle=False
if style=='interp' or style=='texture':
self.interpstyle=True
self.texturestyle= style=='texture'
self.infostyle=False
elif style=='info':
self.interpstyle=False
self.infostyle=True
else:
self.interpstyle=False
self.infostyle=False
print 'PLOTTING TYPE NOT UNDERSTOOD'
if style=='texture' and 'tex' in type:
QMessageBox.warning(self,"warning", "For interp plot, type should be 'h5mar' when style is 'texture'")
self.navchoice=navchoice
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
self.bin=getbin(h5analysis)
if 'h5mar' in type:
self.h5datagrpstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))
qgridtemp=getimapqgrid(h5analysis.attrs['imapstr'], imap=False)
self.pointlist=h5analysis.attrs['pointlist']
self.overlayifcountsbool='ifcounts' in h5mar
# self.countsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'icounts'))
# self.processedcountsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'ifcounts'))
self.qgrid=h5mar['icounts'].attrs['qgrid']
elif 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
self.h5datagrpstr='/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis),'texture', h5grpname))
qgridtemp=h5texgrp.attrs['chigrid']
self.overlayifcountsbool=False
# self.countsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'texture', h5grpname, 'icounts'))
# self.processedcountsarrstr='/'.join((self.h5groupstr, 'analysis/mar345', 'texture', h5grpname, 'ifcounts'))
self.pointlist=h5texgrp.attrs['pointlist']
self.qgrid=h5texgrp.attrs['chigrid']
self.attrdict=getattr(self.h5path, self.h5groupstr)
self.qvals=q_qgrid_ind(self.qgrid)
self.sampleinfo, garbage=getpointinfo(self.h5path, self.h5groupstr)
self.headings=pointinfodictkeysort(self.sampleinfo)
if self.interpstyle:
self.xrdtypeComboBox=QComboBox()
self.xrdtypeComboBox.clear()
if 'icounts' in h5file[self.h5datagrpstr]:
self.xrdtypeComboBox.insertItem(999, 'icounts')
if 'ifcounts' in h5file[self.h5datagrpstr]:
self.xrdtypeComboBox.insertItem(999, 'ifcounts')
if 'idcounts' in h5file[self.h5datagrpstr]:
self.xrdtypeComboBox.insertItem(999, 'idcounts')
if 'imcounts' in h5file[self.h5datagrpstr]:
self.xrdtypeComboBox.insertItem(999, 'imcounts')
self.xrdtypeComboBox.setCurrentIndex(1)
if self.texturestyle:
self.killmap=getkillmap(h5analysis.attrs['killmapstr'])
self.killmapbin=getkillmap(h5analysis.attrs['killmapstr'], bin=self.bin)
self.imap, qgrid=getimapqgrid(h5analysis.attrs['imapstr'])
self.imapbin, qgrid=getimapqgrid(h5analysis.attrs['imapstr'], bin=self.bin)
self.imapkillmap=self.killmap*(self.imap!=0)
self.imapkillmapbin=self.killmapbin*(self.imapbin!=0)
self.chimap, self.chigrid=getchimapchigrid(h5analysis.attrs['chimapstr'])
self.chimapbin, self.chigrid=getchimapchigrid(h5analysis.attrs['chimapstr'], bin=self.bin)
self.imap*=self.killmap
self.imapbin*=self.killmapbin
self.chimap*=self.killmap
self.chimapbin*=self.killmapbin
self.dqchiimage=getdqchiimage(h5analysis.attrs['dqchiimagestr'])
self.dqchiimagebin=getdqchiimage(h5analysis.attrs['dqchiimagestr'], bin=self.bin)
self.bcknd=self.attrdict['bcknd']
if 'lin' in self.bcknd:
self.bckndarr, self.blinwts=readblin(h5mar)
self.bckndarrbin, self.blinwts=readblin(h5mar, bin=self.bin)
else:
bstr=''.join(('b', self.bcknd[:3]))
self.bckndarr=readh5pyarray(h5mar[bstr])
bstr=''.join((bstr, 'bin%d' %self.bin))
self.bckndarrbin=readh5pyarray(h5mar[bstr])
if self.bcknd=='minanom':
if 'bimap' in h5mar:
bimap=readh5pyarray(h5mar['bimap'])
bqgrid=h5mar['bimap'].attrs['bqgrid']
else:
bimap=None
bqgrid=None
self.banomcalc=(self.imapbin, self.qgrid, self.attrdict, bimap, bqgrid)
self.bminanomf=readh5pyarray(h5mar['bminanomf'])
h5file.close()
self.xgrid=self.attrdict['xgrid']
self.zgrid=self.attrdict['zgrid']
self.xcoords=self.attrdict['x']
self.zcoords=self.attrdict['z']
if self.interpstyle:
self.setWindowTitle('Plot interpolation of 1d spectra')
elif self.infostyle:
self.setWindowTitle('Plot sample info')
#PLOT STYLE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plotstylelayout=QGridLayout()
if self.interpstyle:
self.plotpeaksCheckBox=QCheckBox()
self.plotpeaksCheckBox.setText('plot peaks')
self.plotpeaksCheckBox.setChecked(False)
self.peaksstyleLineEdit=QLineEdit()
self.peaksstyleLineEdit.setText('w.6')
self.datamarkerCheckBox=QCheckBox()
self.datamarkerCheckBox.setText('use marker to\nshow spectra posns')
self.datamarkerCheckBox.setChecked(True)
self.datamarkerstyleLineEdit=QLineEdit()
self.datamarkerstyleLineEdit.setText('r>10')
self.xrdtypeLabel=QLabel()
self.xrdtypeLabel.setText('1D-XRD type')
plotstylelayout.addWidget(self.xrdtypeLabel, 0, 0, 1, 1)
plotstylelayout.addWidget(self.xrdtypeComboBox, 1, 0, 1, 1)
plotstylelayout.addWidget(self.plotpeaksCheckBox, 0, 1, 1, 1)
plotstylelayout.addWidget(self.peaksstyleLineEdit, 1, 1, 1, 1)
plotstylelayout.addWidget(self.datamarkerCheckBox, 0, 2, 1, 1)
plotstylelayout.addWidget(self.datamarkerstyleLineEdit, 1, 2, 1, 1)
elif self.infostyle:
self.plotxzCheckBox=QCheckBox()
self.plotxzCheckBox.setText('plot x,z pts')
self.plotxzCheckBox.setChecked(True)
self.xzstyleLineEdit=QLineEdit()
self.xzstyleLineEdit.setText('kx6')
self.datastyleLabel=QLabel()
self.datastyleLabel.setText('data plot style(s)')
self.datastyleLineEdit=QLineEdit()
self.datastyleLineEdit.setText('ro,r-')
#plotstylelayout.addWidget(, 0, 0, 1, 1)
#plotstylelayout.addWidget(, 1, 0, 1, 1)
plotstylelayout.addWidget(self.plotxzCheckBox, 0, 1, 1, 1)
plotstylelayout.addWidget(self.xzstyleLineEdit, 1, 1, 1, 1)
plotstylelayout.addWidget(self.datastyleLabel, 0, 2, 1, 1)
plotstylelayout.addWidget(self.datastyleLineEdit, 1, 2, 1, 1)
cmaplab=QLabel()
cmaplab.setText('colormap\n(cmap or blank)')
self.cmapLineEdit=QLineEdit()
self.cmapLineEdit.setText('jet')
plotstylelayout.addWidget(cmaplab, 0, 3, 1, 1)
plotstylelayout.addWidget(self.cmapLineEdit, 1, 3, 1, 1)
#PLOT RANGE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.interpstyle:
self.pdfsetupButton=QPushButton()
self.pdfsetupButton.setText('plot PDF')
QObject.connect(self.pdfsetupButton,SIGNAL("pressed()"),self.pdfsetup)
self.pdflineLabel1=QLabel()
self.pdflineLabel1.setText('pdf ctrl:')
self.pdflineLabel2=QLabel()
self.pdflineLabel2.setText('ymin,ymax,colstr,linew')
self.pdfplotinfoLineEdit=QLineEdit()
self.pdfplotinfoLineEdit.setText('')
self.interpCheckBox=QCheckBox()
self.interpCheckBox.setText('interp y-axis')
self.interpCheckBox.setChecked(False)
logcbstr='log int. cutoff'
else:
logcbstr='log intensity'
self.logCheckBox=QCheckBox()
self.logCheckBox.setText(logcbstr)
self.logCheckBox.setChecked(False)
logcutlab=QLabel()
logcutlab.setText('log int. cutoff:')
self.logcutSpinBox=QDoubleSpinBox()
self.logcutSpinBox.setValue(10.0)
self.logcutSpinBox.setDecimals(8)
self.logcutSpinBox.setRange(0,1000000 )
self.cmaponethirdSpinBox=QDoubleSpinBox()
self.cmaponethirdSpinBox.setValue(.33)
self.cmaponethirdSpinBox.setRange(.00001, .99999)
self.cmaptwothirdsSpinBox=QDoubleSpinBox()
self.cmaptwothirdsSpinBox.setValue(.67)
self.cmaptwothirdsSpinBox.setRange(.00001, .99999)
xrangelab=QLabel()
if self.interpstyle:
if 'tex' in type:
xrangelab.setText('PHI-range min, max')
else:
xrangelab.setText('Q-range min, max')
elif self.infostyle:
xrangelab.setText('X info min, max')
yrangelab=QLabel()
yrangelab.setText('Y info min, max')
ynumlab=QLabel()
ynumlab.setText('num Y info pts')
perclab=QLabel()
perclab.setText('percentile of data for\n1st, 2nd tertile of cmap')
self.YgetinfominmaxButton=QPushButton()
self.YgetinfominmaxButton.setText('set min/max\nof info{points}')
QObject.connect(self.YgetinfominmaxButton,SIGNAL("pressed()"),self.Ygetinfominmax)
self.XgetinfominmaxButton=QPushButton()
self.XgetinfominmaxButton.setText('set min/max\nof info{points}')
QObject.connect(self.XgetinfominmaxButton,SIGNAL("pressed()"),self.Xgetinfominmax)
self.YinfominSpinBox=QDoubleSpinBox()
self.YinfominSpinBox.setValue(0)
self.YinfominSpinBox.setRange(-999999999, 999999999)
self.YinfominSpinBox.setDecimals(3)
self.YinfomaxSpinBox=QDoubleSpinBox()
self.YinfomaxSpinBox.setValue(1)
self.YinfomaxSpinBox.setRange(-999999999, 999999999)
self.YinfomaxSpinBox.setDecimals(3)
self.YinfonumSpinBox=QSpinBox()
self.YinfonumSpinBox.setValue(100)
self.YinfonumSpinBox.setRange(1, 100000)
self.XinfominSpinBox=QDoubleSpinBox()
self.XinfomaxSpinBox=QDoubleSpinBox()
if self.interpstyle:
self.XinfominSpinBox.setValue(q_qgrid_ind(self.qgrid, 0))
self.XinfominSpinBox.setRange(q_qgrid_ind(self.qgrid, 0), q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
self.XinfomaxSpinBox.setValue(q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
self.XinfomaxSpinBox.setRange(q_qgrid_ind(self.qgrid, 0), q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
elif self.infostyle:
self.XinfominSpinBox.setValue(0)
self.XinfominSpinBox.setRange(-999999999, 999999999)
self.XinfomaxSpinBox.setValue(1)
self.XinfomaxSpinBox.setRange(-999999999, 999999999)
plotrangelayout=QGridLayout()
if self.interpstyle:
plotrangelayout.addWidget(perclab, 0, 0, 2, 2)
plotrangelayout.addWidget(self.cmaponethirdSpinBox, 2, 0, 1, 1)
plotrangelayout.addWidget(self.cmaptwothirdsSpinBox, 2, 1, 1, 1)
plotrangelayout.addWidget(self.pdfsetupButton, 3, 0, 1, 1)
plotrangelayout.addWidget(self.pdflineLabel1, 4, 0, 1, 1)
plotrangelayout.addWidget(self.pdflineLabel2, 3, 1, 1, 1)
plotrangelayout.addWidget(self.pdfplotinfoLineEdit, 4, 1, 1, 1)
plotrangelayout.addWidget(self.logCheckBox, 5, 0, 1, 1)
plotrangelayout.addWidget(self.logcutSpinBox, 5, 1, 1, 1)
plotrangelayout.addWidget(self.interpCheckBox, 6, 0, 1, 1)
# plotrangelayout.addWidget(self.logCheckBox, 3, 0, 1, 2)
# plotrangelayout.addWidget(logcutlab, 4, 0, 1, 1)
# plotrangelayout.addWidget(self.logcutSpinBox, 4, 1, 1, 1)
plotrangelayout.addWidget(xrangelab, 0, 2, 1, 1)
plotrangelayout.addWidget(self.XinfominSpinBox, 1, 2, 1, 1)
plotrangelayout.addWidget(self.XinfomaxSpinBox, 2, 2, 1, 1)
plotrangelayout.addWidget(self.XgetinfominmaxButton, 3, 2, 1, 1)
plotrangelayout.addWidget(yrangelab, 0, 3, 1, 1)
plotrangelayout.addWidget(self.YinfominSpinBox, 1, 3, 1, 1)
plotrangelayout.addWidget(self.YinfomaxSpinBox, 2, 3, 1, 1)
plotrangelayout.addWidget(self.YgetinfominmaxButton, 3, 3, 1, 1)
if self.interpstyle:
plotrangelayout.addWidget(ynumlab, 4, 2, 1, 1)
plotrangelayout.addWidget(self.YinfonumSpinBox, 4, 3, 1, 1)
#PLOT CONTROL+SAVE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.savenavimageButton=QPushButton()
self.savenavimageButton.setText('save .png\nnavigator')
QObject.connect(self.savenavimageButton,SIGNAL("pressed()"),self.savenavimage)
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
if self.interpstyle:
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.interpdraw)
elif self.infostyle:
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.substrateinfoplot)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
self.clearplotsButton=QPushButton()
self.clearplotsButton.setText('clear plots')
QObject.connect(self.clearplotsButton,SIGNAL("pressed()"),self.clearplots)
imglabelLabel=QLabel()
imglabelLabel.setText('Save Name:')
self.imgLabel=QLineEdit()
plotlabellayout=QVBoxLayout()
plotlabellayout.addWidget(imglabelLabel)
plotlabellayout.addWidget(self.imgLabel)
# plotcontrollayout.addWidget(imglabelLabel, 0, 0, 1, 1)
# plotcontrollayout.addWidget(self.imgLabel, 0, 1, 1, 3)
plotcontrollayout=QGridLayout()
plotcontrollayout.addWidget(self.clearplotsButton, 0, 0, 1, 1)
plotcontrollayout.addLayout(plotlabellayout, 0, 1, 1, 3)
plotcontrollayout.addWidget(self.drawButton, 0, 4, 1, 1)
plotcontrollayout.addWidget(self.saveButton, 0, 5, 1, 1)
plotcontrollayout.addWidget(self.savenavimageButton, 0, 6, 1, 1)
#SAMPLE INFO~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.allinfodict={}
self.InfoTextBrowser=QTextBrowser()
self.InfoTextBrowser.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.InfoTextBrowser.setPlainText('IND: selected spec inds\n')
for i, h in enumerate(self.headings):
if i<26:
k=chr(65+i)
else:
k=chr(97+i%26)+chr(97+i%26+i//26-1)
self.allinfodict[k]=self.sampleinfo[h]
self.InfoTextBrowser.setPlainText('%s%s: %s\n' %(str(self.InfoTextBrowser.toPlainText()), k, h))
self.InfoTextBrowser.setReadOnly(True)
self.YInfoMathTextBrowser=QTextBrowser()
self.YInfoMathTextBrowser.setReadOnly(False)
self.YInfoMathTextBrowser.setText('IND')
self.YappendspdshPushButton=QPushButton()
self.YappendspdshPushButton.setText('Append to Spread Sheet')
QObject.connect(self.YappendspdshPushButton,SIGNAL("pressed()"),self.YappendSpreadSheet)
self.YinfoLabel=QLabel()
self.YinfoLabel.setText('label:')
self.YlabelLineEdit=QLineEdit()
self.YlabelLineEdit.setText('')
self.XInfoMathTextBrowser=QTextBrowser()
self.XInfoMathTextBrowser.setReadOnly(False)
self.XappendspdshPushButton=QPushButton()
self.XappendspdshPushButton.setText('Append to Spread Sheet')
QObject.connect(self.XappendspdshPushButton,SIGNAL("pressed()"),self.XappendSpreadSheet)
self.XinfoLabel=QLabel()
self.XinfoLabel.setText('label:')
self.XlabelLineEdit=QLineEdit()
self.XlabelLineEdit.setText('')
self.XmathLabel=QLabel()
self.YmathLabel=QLabel()
if self.interpstyle:
self.YmathLabel.setText('expression for interp Y-axis')
self.XmathLabel.setText('expression for XRD normalization')
elif self.infostyle:
self.YmathLabel.setText('expression for info Y-axis')
self.XmathLabel.setText('expression for info X-axis')
sampleinfolayout=QGridLayout()
sampleinfolayout.addWidget(self.InfoTextBrowser, 0, 0, 6, 4)
sampleinfolayout.addWidget(self.YmathLabel, 0, 4, 1, 4)
sampleinfolayout.addWidget(self.YInfoMathTextBrowser, 1, 4, 2, 4)
sampleinfolayout.addWidget(self.YappendspdshPushButton, 1, 8, 1, 3)
sampleinfolayout.addWidget(self.YinfoLabel, 0, 8, 1, 1)
sampleinfolayout.addWidget(self.YlabelLineEdit, 0, 9, 1, 2)
sampleinfolayout.addWidget(self.XmathLabel, 3, 4, 1, 4)
sampleinfolayout.addWidget(self.XInfoMathTextBrowser, 4, 4, 2, 4)
sampleinfolayout.addWidget(self.XappendspdshPushButton, 4, 8, 1, 3)
sampleinfolayout.addWidget(self.XinfoLabel, 3, 8, 1, 1)
sampleinfolayout.addWidget(self.XlabelLineEdit, 3, 9, 1, 2)
#SPREADSHEET~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.spdshTextBrowser=QTextBrowser()
self.spdshTextBrowser.setPlainText('')
self.spdshTextBrowser.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.spdshTextBrowser.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.spdshTextBrowser.setLineWrapMode(0)
self.spdshFormatLineEdit=QLineEdit()
self.spdshFormatLineEdit.setText('.3f')
spdshFormatLabel=QLabel()
spdshFormatLabel.setText('format:')
self.spdshsavenameLineEdit=QLineEdit()
self.spdshsavenameLineEdit.setText(self.savename1+'.txt')
self.savespdshPushButton=QPushButton()
self.savespdshPushButton.setText('save spreadsheet')
QObject.connect(self.savespdshPushButton,SIGNAL("pressed()"),self.SaveSpreadSheet)
self.ClearSpreadSheet()
self.clearspdshPushButton=QPushButton()
self.clearspdshPushButton.setText('clear\nsheet')
QObject.connect(self.clearspdshPushButton,SIGNAL("pressed()"),self.ClearSpreadSheet)
sampleinfolayout.addWidget(spdshFormatLabel, 0, 11, 1, 1)
sampleinfolayout.addWidget(self.spdshFormatLineEdit, 1, 11, 1, 1)
sampleinfolayout.addWidget(self.clearspdshPushButton, 0, 12, 2, 1)
sampleinfolayout.addWidget(self.savespdshPushButton, 0, 13, 1, 3)
sampleinfolayout.addWidget(self.spdshsavenameLineEdit, 1, 13, 1, 3)
sampleinfolayout.addWidget(self.spdshTextBrowser, 2, 11, 4, 6)
#SPEC INDEX EDITOR~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.includeallButton=QPushButton()
self.includeallButton.setText('include all points')
QObject.connect(self.includeallButton,SIGNAL("pressed()"),self.includeallimages)
self.parseptsButton=QPushButton()
self.parseptsButton.setText('parse pts, avoid NaN')
QObject.connect(self.parseptsButton,SIGNAL("pressed()"),self.ParseIndAvoidNaN)
self.selectedimagesTextBrowser=QTextBrowser()
self.selectedimagesTextBrowser.setPlainText('')
self.selectedimagesTextBrowser.setReadOnly(False)
specindlayout=QGridLayout()
specindlayout.addWidget(self.includeallButton, 0, 0, 1, 2)
specindlayout.addWidget(self.parseptsButton, 1, 0, 1, 2)
specindlayout.addWidget(self.selectedimagesTextBrowser, 0, 2, 2, 3)
xyplotlayout=QGridLayout()
#CHI TEXTURE CONTROL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.texturestyle:
labtextinstruction=QLabel()
labtextinstruction.setText('Will extract highest-intensity peak in Q-range and plot texture averaged over specified Q-width')
lab6=QLabel()
lab6.setText('q-range\nmin,max')
self.peakextractqminSpinBox=QDoubleSpinBox()
#self.peakextractqminSpinBox.setValue(q_qgrid_ind(self.qgrid, 0))
self.peakextractqminSpinBox.setValue(27)
self.peakextractqminSpinBox.setRange(q_qgrid_ind(self.qgrid, 0), q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
self.peakextractqmaxSpinBox=QDoubleSpinBox()
#self.peakextractqmaxSpinBox.setValue(q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
self.peakextractqmaxSpinBox.setValue(28)
self.peakextractqmaxSpinBox.setRange(q_qgrid_ind(self.qgrid, 0), q_qgrid_ind(self.qgrid, self.qgrid[2]-1))
lab8=QLabel()
lab8.setText('# of HWHM or\nQ-width (1/nm)')
self.chiqwidthCheckBox=QCheckBox()
self.chiqwidthCheckBox.setText('use HWHM')
labundercb=QLabel()
labundercb.setText('(unchecked->fixed width)')
self.chiqwidthCheckBox.setChecked(True)
self.chiqwidthSpinBox=QDoubleSpinBox()
self.chiqwidthSpinBox.setValue(2)
self.chiqwidthSpinBox.setRange(0, 5)
lab7=QLabel()
lab7.setText('PSI plot\nmin,max')
self.chiminSpinBox=QDoubleSpinBox()
self.chiminSpinBox.setRange(q_qgrid_ind(self.chigrid, 0), q_qgrid_ind(self.chigrid, self.chigrid[2]-1))
self.chiminSpinBox.setValue(q_qgrid_ind(self.chigrid, 0))
self.chimaxSpinBox=QDoubleSpinBox()
self.chimaxSpinBox.setRange(q_qgrid_ind(self.chigrid, 0), q_qgrid_ind(self.chigrid, self.chigrid[2]-1))
self.chimaxSpinBox.setValue(q_qgrid_ind(self.chigrid, self.chigrid[2]-1))
self.fulltexplotComboBox=QComboBox()
self.fulltexplotComboBox.clear()
self.fulltexplotComboBox.insertItem(0, 'ave LHS+RHS')
self.fulltexplotComboBox.insertItem(1, 'only LHS')
self.fulltexplotComboBox.insertItem(2, 'only RHS')
self.fulltexplotComboBox.setCurrentIndex(2)
self.peakextractdrawButton=QPushButton()
self.peakextractdrawButton.setText('extract peaks,\nplot chi vals')
QObject.connect(self.peakextractdrawButton,SIGNAL("pressed()"),self.peakextractdraw)
self.peakextractsaveButton=QPushButton()
self.peakextractsaveButton.setText('save .png')
QObject.connect(self.peakextractsaveButton,SIGNAL("pressed()"),self.xyplotsave)
self.interpchiCheckBox=QCheckBox()
self.interpchiCheckBox.setText('interpolate in\nPSI direction')
self.interpchiCheckBox.setChecked(False)
self.normchivalsCheckBox=QCheckBox()
self.normchivalsCheckBox.setText('normalize each\nPSI dist by max')
self.normchivalsCheckBox.setChecked(False)
texturesavelabel=QLabel()
texturesavelabel.setText('h5 save name\n(empty->not saved)')
self.texturesaveLineEdit=QLineEdit()
self.texturesaveLineEdit.setText('rhs111')
xyplotlayout.addWidget(labtextinstruction, 0, 0, 1, 12)
xyplotlayout.addWidget(lab6, 1, 0, 1, 2)
xyplotlayout.addWidget(self.peakextractqminSpinBox, 2, 0, 1, 2)
xyplotlayout.addWidget(self.peakextractqmaxSpinBox, 3, 0, 1, 2)
chiqcblayout=QVBoxLayout()
chiqcblayout.addWidget(self.chiqwidthCheckBox)
chiqcblayout.addWidget(labundercb)
xyplotlayout.addLayout(chiqcblayout, 1, 2, 2, 3)
xyplotlayout.addWidget(lab8, 3, 2, 1, 2)
xyplotlayout.addWidget(self.chiqwidthSpinBox, 3, 4, 1, 1)
xyplotlayout.addWidget(lab7, 1, 5, 1, 2)
xyplotlayout.addWidget(self.chiminSpinBox, 2, 5, 1, 2)
xyplotlayout.addWidget(self.chimaxSpinBox, 3, 5, 1, 2)
xyplotlayout.addWidget(self.fulltexplotComboBox, 1, 7, 1, 3)
xyplotlayout.addWidget(self.interpchiCheckBox, 2, 7, 1, 3)
xyplotlayout.addWidget(self.normchivalsCheckBox, 3, 7, 1, 3)
xyplotlayout.addWidget(self.peakextractdrawButton, 1, 10, 1, 2)
xyplotlayout.addWidget(self.peakextractsaveButton, 2, 10, 1, 2)
chisavelinelayout=QVBoxLayout()
chisavelinelayout.addWidget(texturesavelabel)
chisavelinelayout.addWidget(self.texturesaveLineEdit)
xyplotlayout.addLayout(chisavelinelayout, 3, 10, 1, 2)
#X,Y INFO PLOT CONTROL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.infostyle:
self.xyplotoverlayCheckBox=QCheckBox()
self.xyplotoverlayCheckBox.setText('overlay')
self.xyplotoverlayCheckBox.setChecked(True)
self.xyplotButton=QPushButton()
self.xyplotButton.setText('plot info x-y')
QObject.connect(self.xyplotButton,SIGNAL("pressed()"),self.xyinfoplot)
self.xyplotsaveButton=QPushButton()
self.xyplotsaveButton.setText('save .png')
QObject.connect(self.xyplotsaveButton,SIGNAL("pressed()"),self.xyplotsave)
xyplotlayout.addWidget(self.xyplotoverlayCheckBox, 0, 4, 1, 2)
xyplotlayout.addWidget(self.xyplotButton, 0, 6, 1, 1)
xyplotlayout.addWidget(self.xyplotsaveButton, 0, 9, 1, 1)
#PLOT WIDGETS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.navchoice==0:
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
elstr=self.attrdict['elements']
if self.navchoice==1:
infotype='DPmolfracALL'
else:
infotype='XRFmolfracALL'
self.elstrlist, self.compsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype=infotype)
if self.compsarr is None:
print 'NO COMPOSITION NAVIGATOR WINDOW BECAUSE PROBLEM CALCULATING COMPOSITIONS'
self.navw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
else:
print 'COMPS:', self.compsarr
self.navw = compnavigatorwidget(self, self.compsarr, self.elstrlist)
QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
if self.interpstyle:
self.plotw=plotwidget(self, width=7, height=5, dpi=100)
toolbar=self.plotw.gettoolbarinstance()
elif self.infostyle:
self.plotw=subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords, width=5, dpi=100)
if self.texturestyle or self.infostyle:
self.chipeakorinfoplotw=plotwidget(self, width=7, height=4, dpi=100)
#MAIN GRID~~~~~~~~~~~~~~~~~~~~~~~
layout=QGridLayout()
layout.addLayout(plotstylelayout, 0, 0, 1, 4)
layout.addLayout(plotrangelayout, 1, 0, 3, 4)
layout.addLayout(specindlayout, 4, 0, 2, 4)
layout.addWidget(self.navw, 6, 0, 2, 4)
layout.addLayout(plotcontrollayout, 0, 4, 1, 5)
if self.texturestyle or self.infostyle:
layout.addWidget(self.plotw, 1, 4, 7, 5)
if self.infostyle:
layout.addLayout(xyplotlayout, 0, 9, 2, 5)
layout.addWidget(self.chipeakorinfoplotw, 2, 9, 6, 5)
elif self.texturestyle:
layout.addLayout(xyplotlayout, 0, 9, 2, 5)
layout.addWidget(self.chipeakorinfoplotw, 2, 9, 6, 5)
self.chipeakorinfoplotw.axes.set_xlabel('sample info')
self.chipeakorinfoplotw.axes.set_ylabel('Q posn of peak used in texture analysis')
else:
self.chipeakorinfoplotw=None
layout.addWidget(self.plotw, 1, 4, 7, 10)
layout.addLayout(sampleinfolayout, 8, 0, 3, 14)
self.setLayout(layout)
#layouts done~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.navw.plotpoints(self.pointlist, [])
QObject.connect(self.plotw, SIGNAL("genericclickonplot"), self.clickhandler)
self.pointind_extractedpeaks=[]
self.q_extractedpeaks=[]
self.hwhm_extractedpeaks=[]
self.chidrawbool=False
self.spdshselectlist=[]
self.includeallimages()
self.tooltips()
self.Ygetinfominmax()
#END OF __init__~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def clearplots(self):
self.plotw.reinit()
if not self.chipeakorinfoplotw is None:
self.chipeakorinfoplotw.reinit()
def clickhandler(self, clickxy):
garb=None
def chidraw(self):
interpchibool=self.interpchiCheckBox.isChecked()
normchivalsbool=self.normchivalsCheckBox.isChecked()
bin=False
bckndbool=True
texturesavename=str(self.texturesaveLineEdit.text())
savetex = len(texturesavename)>0
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if bin:
countspoint=h5mar['countsbin%d' %self.bin]
imap=self.imapbin
chimap=self.chimapbin
dqchi=self.dqchiimagebin
bckndarr=self.bckndarrbin
imapkillmap=self.imapkillmapbin
else:
countspoint=h5file['/'.join((self.h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
imap=self.imap
chimap=self.chimap
dqchi=self.dqchiimage
bckndarr=self.bckndarr
imapkillmap=self.imapkillmap
chiminplot=self.chiminSpinBox.value()
chimaxplot=self.chimaxSpinBox.value()
chiindexmin=ind_qgrid_q(self.chigrid, chiminplot, fractional=False)
chiindexmax=ind_qgrid_q(self.chigrid, chimaxplot, fractional=False)
chimapinds_plot=numpy.uint16(range(chiindexmin, chiindexmax+1))+1 # THE +1 IS BECAUSE INT HIS ROUTINE WE WILL OPERATE IN THE CHIMAP INDECES WHICH ARE ONE HIGHER THAN CHIGRID INDECES AND CAN BE NEGATIVE
#savechigrid=qgrid_minmaxint(q_qgrid_ind(self.chigrid, chiindexmin), q_qgrid_ind(self.chigrid, chiindexmax), self.chigrid[1])
chiqwidthSpinBoxval=self.chiqwidthSpinBox.value()
if self.chiqwidthCheckBox.isChecked():
qwidth=chiqwidthSpinBoxval*self.hwhm_extractedpeaks
else:
qwidth=[chiqwidthSpinBoxval]*len(self.hwhm_extractedpeaks)
if savetex:
npts=numpts_attrdict(self.attrdict)
saveinds=numpy.uint16(self.pointind_extractedpeaks)
savearr=numpy.ones((npts, self.chigrid[2]), dtype='float32')*numpy.nan
q_peaks=numpy.ones(npts, dtype='float32')*numpy.nan
dq_peaks=numpy.ones(npts, dtype='float32')*numpy.nan
savenormvals=numpy.ones(npts, dtype='float32')*numpy.nan
ind2dlist=[]
self.chicounts=None
for pointind, centerq, qw in zip(self.pointind_extractedpeaks, self.q_extractedpeaks, qwidth):
if self.chicounts is None:
self.chicounts=numpy.zeros((1, len(chimapinds_plot)), dtype='float32')
else:
self.chicounts=numpy.concatenate((self.chicounts, numpy.zeros((1, len(chimapinds_plot)), dtype='float32')), axis=0)
plotarr=countspoint[pointind, :, :]
lowqbin=ind_qgrid_q(self.qgrid, centerq-qw, fractional=False)+1
highqbin=ind_qgrid_q(self.qgrid, centerq+qw, fractional=False)+1
if self.bcknd=='minanom':
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
banom=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis), 'banom'))][self.imnum, :, :]
plotarr=bckndsubtract(plotarr, bckndarr, imapkillmap, btype=self.bcknd, banom_f_f=(banom, self.bminanomf[pointind, 0], self.bminanomf[pointind, 1]))[0]
elif 'lin' in self.bcknd:
plotarr=bckndsubtract(plotarr, constructbckndarr_linbyposn(bckndarr, pointind), imapkillmap, btype=self.bcknd, linweights=self.blinwts[pointind])[0]
else:
plotarr=bckndsubtract(plotarr, bckndarr, imapkillmap, btype=self.bcknd)[0]
texplotind=self.fulltexplotComboBox.currentIndex()
if texplotind==1:
ind2d=numpy.where(((imap>=lowqbin)&(imap<=highqbin))&(chimap<0))
elif texplotind==2:
ind2d=numpy.where(((imap>=lowqbin)&(imap<=highqbin))&(chimap>0))
else:
ind2d=numpy.where(((imap>=lowqbin)&(imap<=highqbin))&(chimap!=0)) #as long as the bin vals are not zero this checks for killmap because imap contains killmap, per a few lines above. the chimap!=0 is just to be safe
if savetex:
ind2dlist+=[ind2d]
if ind2d[0].size==0:
print 'ERROR - THE ANNULUS FOR PSI PLOTTING WAS NOT FOUND IN THE BINNED MAR IMAGE'
chimapinds=chimap[ind2d] #do not substract one, see above note. there should be no zeros in this
self.countvals=plotarr[ind2d]
self.dqchivals=dqchi[ind2d]
sortedchivals=sorted(list(set(chimapinds)))
binnedchidata=[[chi, (self.countvals[chimapinds==chi]*self.dqchivals[chimapinds==chi]).sum()/(self.dqchivals[chimapinds==chi].sum())] for chi in sortedchivals if self.dqchivals[chimapinds==chi].sum()>0]
cinds=numpy.int16(map(operator.itemgetter(0),binnedchidata))
vals=numpy.float32(map(operator.itemgetter(1),binnedchidata))
if texplotind==0:
poschiind=numpy.where(cinds>0)
negchiind=numpy.where(cinds<0)
abschi=numpy.abs(cinds)
cinds=sorted(list(set(abschi)))
vals=numpy.float32([vals[abschi==chi].sum()/(abschi==chi).sum() for chi in cinds])
elif texplotind==1:
temp=copy.copy(cinds)
cinds=numpy.abs(temp[::-1])
temp=copy.copy(vals)
vals=temp[::-1]
cinds=numpy.uint16(cinds)
if interpchibool:
usablevals=numpy.float32(scipy.interp(chimapinds_plot, cinds, vals))
indboolarr=numpy.bool_([True]*len(chimapinds_plot))
else:
indboolarr=numpy.array([cmi in cinds for cmi in chimapinds_plot])
usablevals=numpy.float32([vals[count] for count, ind in enumerate(cinds) if ind in chimapinds_plot])
if normchivalsbool:
print 'before', usablevals.sum()
normval=numpy.max(usablevals)
usablevals/=normval
print 'after', usablevals.sum()
else:
normval=1.
if savetex:
savearr[pointind][cinds-1]=vals/normval
q_peaks[pointind]=centerq
dq_peaks[pointind]=qw
savenormvals[pointind]=normval
self.chicounts[-1, indboolarr]=usablevals[:]
print '**', self.chicounts.shape, self.chicounts.sum()
h5file.close()
if savetex:
maxnuminds=max([len(xind) for xind, yind in ind2dlist])
ind2dsavearr=numpy.ones((npts, 2, maxnuminds), dtype='uint16')*32767
for pointind, ind2d in zip(saveinds, ind2dlist):
xind, yind = ind2d
ind2dsavearr[pointind, 0, :len(xind)]=xind[:]
ind2dsavearr[pointind, 1, :len(yind)]=yind[:]
h5file=h5py.File(self.h5path, mode='r+')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'texture' in h5mar:
h5tex=h5mar['texture']
else:
h5tex=h5mar.create_group('texture')
if texturesavename in h5tex:
del h5tex[texturesavename]
h5texgrp=h5tex.create_group(texturesavename)
pointlist=[]
for ind, arr in enumerate(savearr):#do this check in case saveinds included a point where everything ended being nan
if not numpy.all(numpy.isnan(arr)):
pointlist+=[ind]
h5texgrp.attrs['pointlist']=pointlist
h5texgrp.attrs['chigrid']=self.chigrid
h5texgrp.attrs['chiminplot']=chiminplot
h5texgrp.attrs['chimaxplot']=chimaxplot
h5texgrp.attrs['chiindexmin']=chiindexmin
h5texgrp.attrs['chiindexmax']=chiindexmax
h5texgrp.attrs['q_peaks']=q_peaks
h5texgrp.attrs['qhalfwidth']=dq_peaks
h5texgrp.attrs['normvals']=savenormvals #will be 1s and 0s if the was no normalization
if bin:
b=self.bin
else:
b=0
h5texgrp.attrs['bin']=b
h5texgrp.attrs['bckndbool']=int(bckndbool)
h5texgrp.create_dataset('icounts', data=savearr)
h5texgrp.create_dataset('ind2d', data=ind2dsavearr)
h5file.close()
self.chicounts[numpy.isnan(self.chicounts)]=0. #ideally would use nan to make a masked interp plot but not implemented yet
self.chidrawbool=True
self.interpdraw()
def peakextractdraw(self):
selectlist=self.getselectlist()
if len(selectlist)==0:
print 'abort plotting. no slected images'
return
print 'below is the info of the brightest peak in the selected range witha line for every point in poinlist. This is for pasting into a spreadhseet. copy until ^^^^^^^^^^^^\n','\t'.join(('index','q','hwhm','height','sigq','sighwhm','sigheight'))
self.pointind_extractedpeaks, peakinfo=getpeaksinrange(self.h5path, self.h5groupstr, indlist=selectlist, qmin=self.peakextractqminSpinBox.value(), qmax=self.peakextractqmaxSpinBox.value(), returnonlyq=False, performprint=True)
print '^^^^^^^^^^^^^^'
newimlist=''
for pointind in self.pointind_extractedpeaks:
newimlist+=',%d' %pointind
self.selectedimagesTextBrowser.setPlainText(newimlist[1:])
self.navw.plotpoints(self.pointlist, [], self.pointind_extractedpeaks)
self.q_extractedpeaks=numpy.float32(peakinfo[:, 0])
self.hwhm_extractedpeaks=numpy.float32(peakinfo[:, 1])
self.chidraw()
self.chipeakorinfoplotw.performplot([self.infovalsarr, self.q_extractedpeaks])
self.plotw.fig.canvas.draw()
self.navw.fig.canvas.draw()
def interpdraw(self):
if self.chidrawbool:
qminplot=self.chiminSpinBox.value()
qmaxplot=self.chimaxSpinBox.value()
qgrid=self.chigrid
selectlist=self.pointind_extractedpeaks
else:
qminplot=self.XinfominSpinBox.value()
qmaxplot=self.XinfomaxSpinBox.value()
qgrid=self.qgrid
selectlist=numpy.uint16(self.getselectlist())
infovalsarr_interpto=numpy.linspace(self.YinfominSpinBox.value(), self.YinfomaxSpinBox.value(), num=self.YinfonumSpinBox.value())
qindexmin=ind_qgrid_q(qgrid, qminplot, fractional=False)
qindexmax=ind_qgrid_q(qgrid, qmaxplot, fractional=False)
qindarr=numpy.uint16(range(qindexmin, qindexmax+1))
normarray=self.CalculateInfoVals(str(self.XInfoMathTextBrowser.toPlainText()), selectlist)
self.infovalsarr=self.CalculateInfoVals(str(self.YInfoMathTextBrowser.toPlainText()), selectlist)
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
if self.chidrawbool:
counts=self.chicounts
else:
datatypestr=unicode(self.xrdtypeComboBox.currentText())
if datatypestr=='ifcounts':
counts=readh5pyarray(h5file[self.h5datagrpstr]['ifcounts'])[selectlist][:, qindarr]
elif datatypestr=='idcounts':
counts=readh5pyarray(h5file[self.h5datagrpstr]['idcounts'])[selectlist][:, qindarr]
elif datatypestr=='imcounts':
counts=readh5pyarray(h5file[self.h5datagrpstr]['imcounts'])[selectlist][:, qindarr]
else:
counts=readh5pyarray(h5file[self.h5datagrpstr]['icounts'])[selectlist][:, qindarr]
if self.plotpeaksCheckBox.isChecked() and not self.chidrawbool:
pkcounts=readh5pyarray(h5file[self.h5datagrpstr]['pkcounts'])
h5file.close()
if numpy.any(numpy.isnan(counts)):
QMessageBox.warning(self,"failed", "In that range, 1d data contained NaN. Aborting")
return
# data=None this method interped in both directions whcih causes problems
# fullinfovalsarr=[]
# for cnt, imnum in enumerate(selectlist):
# fullinfovalsarr+=[infovalsarr[cnt]]*len(qindarr)
# if data is None:
# data=counts[imnum, qindarr]
# else:
# data=numpy.append(data, counts[imnum, qindarr])
# fullinfovalsarr=numpy.float32(fullinfovalsarr)
# fullqindarr=numpy.float32(list(qindarr)*len(selectlist))
#xrdUI.py
# interpolator=scipy.interpolate.interp2d(fullqindarr,fullinfovalsarr,data)#flattened since not regular. x interpolation in indeces and y in actual values
#
# plotdata=interpolator(qindarr, infovalsarr_interpto)
# print infovalsarr_interpto
# print self.infovalsarr
# print 'raw', counts[:,0]
#on 17Mar2009 discover problem with Hanjong's BiTiO sample where if interp vs x or z the interp uses the 1st value at a given q for all of info axis. Plotting vs IND is ok and plotting vs -x or -z is ok. But reversing the infovals doesnt change anything. the below sorting solved the problem for some unknown reason
sortmap=numpy.argsort(self.infovalsarr)
self.infovalsarr=self.infovalsarr[sortmap]
normarray=normarray[sortmap]
counts=counts[sortmap]
if self.interpCheckBox.isChecked():
plotdata=numpy.float32([scipy.interp(infovalsarr_interpto, self.infovalsarr, arr/normarray) for arr in counts.T]).T
else:
cl=[numpy.argmin((self.infovalsarr-iv)**2) for iv in infovalsarr_interpto]
plotdata=numpy.float32([counts[c]/normarray[c] for c in cl])
infoplotindeces=scipy.interp(self.infovalsarr, infovalsarr_interpto, numpy.float32(range(len(infovalsarr_interpto))))#need to plot in indeces since plotting over imshow so use the full interpolated grid with its indeces to figure out where the original data will lie
cmap=self.getcmap()
print cmap
self.plotw.axes.hold(False)
if self.logCheckBox.isChecked():
plotdata[plotdata<self.logcutSpinBox.value()]=self.logcutSpinBox.value()
if (plotdata<=0).sum()==0:
plotdata=numpy.log10(plotdata+1)
else:
print 'log not taken because there is data <=0'
self.plotw.performplot(plotdata, upperorigin=False, cmap=cmap, aspect=.75*qindarr.size/infovalsarr_interpto.size)
self.plotw.axes.hold(True)
if self.datamarkerCheckBox.isChecked():
marks=([-0.5]*len(infoplotindeces), infoplotindeces)
styletext=unicode(self.datamarkerstyleLineEdit.text())
self.plotw.axes.plot(marks[0],marks[1], styletext[:2], markersize=eval(styletext[2:]))
if self.plotpeaksCheckBox.isChecked() and not self.chidrawbool:
peakqplotindlist=[]
peakinfoplotindlist=[]
selectlist
for peakind, infoplotind in zip(selectlist[sortmap], infoplotindeces):
qvalarray, garb, garb=peakinfo_pksavearr(pkcounts[peakind])
qplotind=ind_qgrid_q(qgrid, qvalarray, fractional=True)-qindexmin #this is based on qindarr=numpy.uint16(range(qindexmin, qindexmax+1))
maxallowed=qindarr.size-1
qplotind=qplotind[(qplotind>=0)&(qplotind<=maxallowed)]
peakinfoplotindlist+=[infoplotind]*qplotind.size
peakqplotindlist+=list(qplotind)
styletext=unicode(self.peaksstyleLineEdit.text())
self.plotw.axes.plot(peakqplotindlist,peakinfoplotindlist, styletext[:2],markersize=eval(styletext[2:]))
print '$', qvalarray
print '@', ind_qgrid_q(qgrid, qvalarray, fractional=True)-qindexmin
print '^', peakqplotindlist
print '%', peakinfoplotindlist
print '*1',self.qgrid
#plot PDF lines
pdfinfostr=str(self.pdfplotinfoLineEdit.text())
if len(pdfinfostr.strip())>0:
#try:
pdfymin, pdfymax, pdfcolstr, lwstr=[s.strip() for s in pdfinfostr.split(',')]
pdfrange=numpy.float32([pdfymin, pdfymax])
pdfrangeind=scipy.interp(pdfrange, infovalsarr_interpto, numpy.float32(range(len(infovalsarr_interpto))))
h=[]
pdfqlist=[]
for d, height in self.pdfentry:
h+=[height]
pdfqlist+=[d]
h=numpy.float32(h)
h/=h.max()
pdfqlist=numpy.float32(pdfqlist)
pdfqindlist=ind_qgrid_q(qgrid, pdfqlist, fractional=True)-qindexmin
pdflwlist=eval(lwstr)#which may contain the variable 'h' which will be the relative peak height
if not isinstance(pdflwlist, numpy.ndarray):
pdflwlist=pdflwlist*numpy.ones(pdfqlist.shape)
for pdfqind, pdflw in zip(pdfqindlist, pdflwlist):
self.plotw.axes.plot([pdfqind, pdfqind], pdfrangeind, pdfcolstr, linewidth=pdflw)
#except:
#print 'ERROR IN PLOTTING PDF LINES!!'
qlabelind=numpy.uint16(range(5))*(len(qindarr)-1)//4.0
qlabels=['%.2f' %q_qgrid_ind(qgrid, qindarr[i]) for i in qlabelind]
self.plotw.axes.set_xticks(qlabelind)
self.plotw.axes.set_xticklabels(qlabels)
if self.chidrawbool or 'tex' in self.type:
self.plotw.axes.set_xlabel('fiber texture angle (deg)')
else:
self.plotw.axes.set_xlabel('scattering vector (1/nm)')
ylabelind=numpy.uint16(range(5))*(len(infovalsarr_interpto)-1)//4.0
ylabels=['%.2f' %infovalsarr_interpto[i] for i in ylabelind]
self.plotw.axes.set_yticks(ylabelind)
self.plotw.axes.set_yticklabels(ylabels)
self.plotw.axes.set_ylabel(str(self.YlabelLineEdit.text()))
self.plotw.axes.set_xlim([-0.5, plotdata.shape[1]+0.5])
self.plotw.axes.set_ylim([-0.5, plotdata.shape[0]+0.5])
self.chidrawbool=False
self.plotw.fig.canvas.draw()
self.plotw.axes.hold(False)
def pdfsetup(self):
if 'h5tex' in self.type:
idialog=pdfDialog(self, filename='TextureDatabase.txt', cvtfcn=lambda x:x)
else:
idialog=pdfDialog(self)
if idialog.exec_():
#label=unicode(idialog.labellineEdit.text())
self.pdfentry=idialog.pdflist[idialog.pdfcomboBox.currentIndex()]
colstr=unicode(idialog.colorlineEdit.text())
if colstr=='':
colstr='k:'
lwstr='4*h'
rangestr=`self.YinfominSpinBox.value()`+','+`self.YinfomaxSpinBox.value()`
self.pdfplotinfoLineEdit.setText(','.join((rangestr, colstr, lwstr)))
def picclickprocess(self, picnum):
picname='%d' %picnum
selectlist=sorted(list(set(self.getselectlist()+[picnum])))
newimlist=''
for pointind in selectlist:
newimlist+=',%d' %pointind
self.selectedimagesTextBrowser.setPlainText(newimlist[1:])
self.navw.plotpoints(self.pointlist, [], selectlist)
self.navw.fig.canvas.draw()
def tooltips(self):
try:
self.xrdtypeComboBox.setToolTip('choose name of dataset to be plotted')
except:
None
try:
self.peaksstyleLineEdit.setToolTip('matplotlib style string without quotation\nmarks, e.g. <color><pointstyle><linestyle>')
except:
None
try:
self.datamarkerCheckBox.setToolTip('matplotlib stytle string for markers\nthat will appear on the y-axis to denote\nthe positions of data')
except:
None
try:
self.xrdtypeLabel.setToolTip('')
except:
None
try:
self.plotxzCheckBox.setToolTip('')
except:
None
try:
self.xzstyleLineEdit.setToolTip('matplotlib style string for\nplots of Xinfo vs Yinfo')
except:
None
try:
self.datastyleLineEdit.setToolTip('matplotlib style string')
except:
None
try:
self.cmapLineEdit.setToolTip('any cmap name from matplotlib.cm\ndefault is jet')
except:
None
try:
self.pdfplotinfoLineEdit.setToolTip('ymin and ymax are numeric values of\nthe y-axis over which the PDF lines will\nbe plotted. colstr is the matplotlib color character,\nlinew is the width of the PDF lines and the character\n"h" can be used to represent the peak height so that the\nline width can be made proportional to the peak height.')
except:
None
try:
self.interpCheckBox.setToolTip('if unchecked, the number of "pixels" in\nthe y-direction will be "numy Y info pts",\nif unchecked there will be one pixel for each datapoint')
except:
None
try:
self.logCheckBox.setToolTip('the false color scale will be\nlogarithmic and the numbers in the\ncolorbar will be the log10 values')
except:
None
try:
self.logcutSpinBox.setToolTip('everything below this value\nwill be set to this value')
except:
None
try:
self.cmaponethirdSpinBox.setToolTip('if this value is smaller (larger) than .33,\nthe bottom third of the cmap color range will\nbe shrunk (expanded). if the colorbar does\nnot change as you expect, try closing and\nreopening this window.')
except:
None
try:
self.YinfominSpinBox.setToolTip('The min and max values will become\nthe limits of the plot axis. In some cases,\nthe range cannot extend beyond the available data\nbut sometimes that is fine.')
except:
None
try:
self.XinfominSpinBox.setToolTip('The min and max values will become\nthe limits of the plot axis. In some cases,\nthe range cannot extend beyond the available data\nbut sometimes that is fine.')
except:
None
try:
self.clearplotsButton.setToolTip('The main image will be plotted over\nolder images, but any symbols\nwill cummulate with repeated plotting commands.\nPress this to clear everything')
except:
None
try:
self.imgLabel.setToolTip('will appear in filename of saved figure\n(AVOID ILLEGAL CHARACTERS)')
except:
None
try:
self.InfoTextBrowser.setToolTip('')
except:
None
try:
self.YInfoMathTextBrowser.setToolTip('This string can contain math commands and the\nkeys indicates in the list to the left\n(capital letters for the corresponding sample\ninfo and IND for the spec index. For example,\n"IND*numpy.sqrt(B**2+A**2)"')
except:
None
try:
self.XInfoMathTextBrowser.setToolTip('')
except:
None
try:
self.spdshFormatLineEdit.setToolTip('The expression will be evaluated and the numeric\nresults will be pu into the spreadhseet string\nusing this Python number->string conversion code')
except:
None
try:
self.selectedimagesTextBrowser.setToolTip('When the expressions in the below fields are evaluated,\nonly the spec indeces included in this comma-delimited\nlist will be used. You can delete indeces by removing\nthe text or "parse pts, avoid NaN" which will evaluate\nthe expressions and remove the indeces that yielded a NaN\nresult. You can add indeces by typing the numbers,\nclicking the navigator, or "include all points"')
except:
None
try:
self.peakextractqminSpinBox.setToolTip('The center of the Q-window used for gathering fiber\ntexture data will be the position of the largest\n(biggest "height" value) peak in the range of Q values\nspecified here.')
except:
None
try:
self.chiqwidthCheckBox.setToolTip('Select whether to use the specified Q-width\nfor every spec index or to use the specifiec\nnumber of HWHM from the curve fitting')
except:
None
try:
self.chiqwidthSpinBox.setToolTip('This is a half-interval of Q for the texture or\na number of half-widths of the identified Bragg peak -\nthis determines the Q-window used in the\ntexture calculation')
except:
None
try:
self.chiminSpinBox.setToolTip('The PSI range over which the texture will\nbe analyzed is specified here. This will\ndefault to the range indicate by measurement grid,\nbut for a given Q-range the PSI-range may be smaller.\nIf the specified range reaches beyond the data, the texture\nresults in the non-existent range will be NaN')
except:
None
try:
self.fulltexplotComboBox.setToolTip('The sides of the detector can be analyzed\nseparately or avergaed together (only average if\nyou know the symmetry is near perfect).')
except:
None
try:
self.interpchiCheckBox.setToolTip('If unchecked the pixel width in the PSI-axis will\nbe the spacing in the PSI measurement grid, if check\ninterpolation will be used to make a smoother image.')
except:
None
try:
self.normchivalsCheckBox.setToolTip('')
except:
None
try:
self.texturesaveLineEdit.setToolTip('This will be the name of the\ntexture group in the .h5 file.')
except:
None
def includeallimages(self):
newimlist=''
for pointind in self.pointlist:
newimlist+=',%d' %pointind
self.selectedimagesTextBrowser.setPlainText(newimlist[1:])
self.navw.plotpoints(self.pointlist, [])
self.navw.fig.canvas.draw()
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, unicode(self.imgLabel.text())))).replace('\\','/').encode())
def savenavimage(self):
self.navw.save(os.path.join(self.runpath, ''.join((self.savename1, '_2DIntPlotPoints', unicode(self.imgLabel.text())))).replace('\\','/').encode())
def getcmap(self):
try:
cmap=eval('matplotlib.cm.'+str(self.cmapLineEdit.text()))
initvals=numpy.arange(256)
rgblist=cmap(initvals)[:, :3]
except:
initvals=numpy.array([0, 0.333, .666, 1.0])
rgblist=numpy.array([[0,.1,0],[.3,.4,.33],[.6,.7,.66],[.9,1.0,1.0]])
inds=numpy.arange(initvals.size)/(initvals.size-1.0)
interppoints=numpy.arange(4)/(3.0)
interpvals=numpy.array([0.0, self.cmaponethirdSpinBox.value(), self.cmaptwothirdsSpinBox.value(), 1.0])
stretchedvals=numpy.interp(inds, interppoints, interpvals)
cdict=dict(red=[], green=[], blue=[])
for v,col in zip(stretchedvals,rgblist):
r,g,b=col
cdict['red'].append((v, r, r))
cdict['green'].append((v, g, g))
cdict['blue'].append((v, b, b))
return matplotlib.colors.LinearSegmentedColormap('mycolors', cdict)
def substrateinfoplot(self):
if not len(self.attrdict['acquisition_shape'])!=2:
print 'ABORTING PLOT: ONLY SUPPORT FOR MESH' # if support for linear scans is added, the 'USER-COMPILED' cases need special treatment as xgrid and zgrid are meaningless
return
selectlist=numpy.uint16(self.getselectlist())
valarr=self.CalculateInfoVals(str(self.YInfoMathTextBrowser.toPlainText()), selectlist)
xarr=self.sampleinfo['x(mm)'][selectlist]
zarr=self.sampleinfo['z(mm)'][selectlist]
#ylim=self.plotw.axes.get_ylim()
#xlim=self.plotw.axes.get_xlim()
self.plotw.axes.hold(False)
x_interpto=numpy.linspace(xarr.min(), xarr.max(), self.xgrid[2])
z_interpto=numpy.linspace(zarr.min(), zarr.max(), self.zgrid[2])
interpolator=scipy.interpolate.interp2d(xarr, zarr, valarr)#flattened since not regular. x interpolation in indeces and y in actual values
plotdata=interpolator(x_interpto, z_interpto)
self.plotw.performplot(plotdata, upperorigin=True, cmap=str(self.cmapLineEdit), extent=(xarr.min(), xarr.max(), zarr.min(), zarr.max()))
#self.plotw.axes.set_ylim(ylim)
#self.plotw.axes.set_xlim(xlim)
self.plotw.axes.hold(True)
if self.plotxzCheckBox.isChecked():
self.plotw.inmark=str(self.xzstyleLineEdit)
plotpts=selectlist
else:
plotpts=[]
self.plotw.plotpoints(self.pointlist, [], plotpts) #this include plotting circle and formatting axis
self.plotw.fig.canvas.draw()
def SaveSpreadSheet(self):
f=open(os.path.join(self.runpath, str(self.spdshsavenameLineEdit.text())).replace('\\','/').encode(), 'w')
f.write(str(self.spdshTextBrowser.toPlainText()))
f.close()
def xyplotsave(self):
if self.interpstyle:
temp='_extractedpeaks'
elif self.infostyle:
temp='_'+str(self.YlabelLineEdit.text())+'vs'+str(self.XlabelLineEdit.text())
self.chipeakorinfoplotw.save(os.path.join(self.runpath, ''.join((self.savename1, unicode(self.imgLabel.text()), temp))).replace('\\','/').encode())
def xyinfoplot(self):
selectlist=numpy.uint16(self.getselectlist())
yarr=self.CalculateInfoVals(str(self.YInfoMathTextBrowser.toPlainText()), selectlist)
xarr=self.CalculateInfoVals(str(self.XInfoMathTextBrowser.toPlainText()), selectlist)
datastylestr=str(self.datastyleLineEdit.text())
stylelist=[]
while ',' in datastylestr:
temp, garbage, datastylestr=datastylestr.partition(',')
temp.replace(' ', '')
datastylestr.replace(' ', '')
stylelist+=[temp]
datastylestr.replace(' ', '')
stylelist+=[datastylestr]
self.chipeakorinfoplotw.axes.hold(self.xyplotoverlayCheckBox.isChecked())
for style in stylelist:
#self.chipeakorinfoplotw.performplot([xarr, yarr], overlay=True, axesformat='', formstr=style)
self.chipeakorinfoplotw.axes.plot(xarr, yarr, style)
self.chipeakorinfoplotw.axes.hold(True)
self.chipeakorinfoplotw.axes.set_ylabel(str(self.YlabelLineEdit.text()))
self.chipeakorinfoplotw.axes.set_xlabel(str(self.XlabelLineEdit.text()))
self.chipeakorinfoplotw.axes.set_ylim([self.YinfominSpinBox.value(), self.YinfomaxSpinBox.value()])
self.chipeakorinfoplotw.axes.set_xlim([self.XinfominSpinBox.value(), self.XinfomaxSpinBox.value()])
self.chipeakorinfoplotw.fig.canvas.draw()
def Ygetinfominmax(self):
selectlist=numpy.uint16(self.getselectlist())
if len(selectlist)==0:
return
infovalsarr=self.CalculateInfoVals(str(self.YInfoMathTextBrowser.toPlainText()), selectlist)
self.YinfominSpinBox.setValue(numpy.min(infovalsarr))
self.YinfomaxSpinBox.setValue(numpy.max(infovalsarr))
def Xgetinfominmax(self):
selectlist=numpy.uint16(self.getselectlist())
if self.interpstyle:
h5file=h5py.File(self.h5path, mode='r')
datatypestr=unicode(self.xrdtypeComboBox.currentText())
if datatypestr=='ifcounts':
counts=readh5pyarray(h5file[self.h5datagrpstr]['ifcounts'])[selectlist]
elif datatypestr=='idcounts':
counts=readh5pyarray(h5file[self.h5datagrpstr]['idcounts'])[selectlist]
elif datatypestr=='imcounts':
counts=readh5pyarray(h5file[self.h5datagrpstr]['imcounts'])[selectlist]
else:
counts=readh5pyarray(h5file[self.h5datagrpstr]['icounts'])[selectlist]
notnanbool=numpy.bool_([numpy.logical_not(numpy.any(numpy.bool_(numpy.isnan(arr)))) for arr in counts.T])
self.XinfominSpinBox.setValue(numpy.min(self.qvals[notnanbool]))
self.XinfomaxSpinBox.setValue(numpy.max(self.qvals[notnanbool]))
h5file.close()
elif self.infostyle:
if len(selectlist)==0:
return
infovalsarr=self.CalculateInfoVals(str(self.XInfoMathTextBrowser.toPlainText()), selectlist)
self.XinfominSpinBox.setValue(numpy.min(infovalsarr))
self.XinfomaxSpinBox.setValue(numpy.max(infovalsarr))
def CalculateInfoVals(self, mathstr, pts):
if mathstr=='':
return numpy.ones(len(pts), dtype='float32')
pts=numpy.uint16(pts)
mathstr=mathstr.replace('IND', 'numpy.float32(pts)')
d=self.allinfodict
for vc in d.keys():
mathstr=mathstr.replace(vc,"d['%s'][pts]" %vc)
print 'Calculating: ', mathstr
try:
arr=eval(mathstr)
return arr
except:
print 'ERROR IN INFO CALCULATION - using ones'
return numpy.ones(len(pts), dtype='float32')
def getselectlist(self):
imlist=unicode(self.selectedimagesTextBrowser.toPlainText())
selectlist=[]
while len(imlist.partition(',')[0])>0:
numstr, garb, imlist=imlist.partition(',')
selectlist+=[eval(numstr)]
if len(selectlist)==0:
print 'WARNING. no slected images'
return []
return sorted(list(set(selectlist)))
def AppendToSpreadSheet(self, mathstr, label=''):
selectlist=self.getselectlist()
if len(selectlist)==0:
print 'ABORTING: no indeces selected'
return
if len(self.spdshselectlist)>0 and selectlist!=self.spdshselectlist:
print 'ABORTING: cannot append to spreadsheet because the select index set is different'
return
if len(self.spdshselectlist)==0:
temp=['SpecInd']+[`int(round(i))` for i in selectlist]
self.spdshTextBrowser.setPlainText('\n'.join(temp))
self.spdshselectlist=selectlist
arr=self.CalculateInfoVals(mathstr, selectlist)
fs='%'+str(self.spdshFormatLineEdit.text())
temp=[label]+[fs %val for val in arr]
lines=str(self.spdshTextBrowser.toPlainText()).splitlines()
for i, st in enumerate(temp):
lines[i]+='\t%s' %st
self.spdshTextBrowser.setPlainText('\n'.join(lines))
def ClearSpreadSheet(self):
self.spdshselectlist=[]
self.spdshTextBrowser.setPlainText('')
def YappendSpreadSheet(self):
self.AppendToSpreadSheet(str(self.YInfoMathTextBrowser.toPlainText()), str(self.YlabelLineEdit.text()))
def XappendSpreadSheet(self):
self.AppendToSpreadSheet(str(self.XInfoMathTextBrowser.toPlainText()), str(self.XlabelLineEdit.text()))
def ParseIndAvoidNaN(self):
selectlist=numpy.uint16(self.getselectlist())
yarr=self.CalculateInfoVals(str(self.YInfoMathTextBrowser.toPlainText()), selectlist)
xarr=self.CalculateInfoVals(str(self.XInfoMathTextBrowser.toPlainText()), selectlist)
selectlist=selectlist[numpy.logical_not(numpy.isnan(yarr)) & numpy.logical_not(numpy.isnan(xarr))]
newimlist=''
for pointind in selectlist:
newimlist+=',%d' %pointind
self.selectedimagesTextBrowser.setPlainText(newimlist[1:])
self.navw.plotpoints(self.pointlist, [], selectlist)
self.navw.fig.canvas.draw()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class neighborwindow(QDialog):
def __init__(self, parent, h5path, h5groupstr, runpath):
super(neighborwindow, self).__init__(parent)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.runpath=runpath
self.savename1='_'.join((os.path.split(self.h5path)[1][0:-3], self.h5groupstr, ''))
self.imnamelist=[]
h5file=h5py.File(self.h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
self.attrdict=getattr(self.h5path, self.h5groupstr)
self.bin=getbin(h5analysis)
self.pointlist=self.attrdict['pointlist']
elstr=self.attrdict['elements']
self.DPelstrlist, self.DPcompsarr=(None, None)
#using only tenrary compositions!
if 'depprof' in h5analysis:
self.DPelstrlist, self.DPcompsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype='DPmolfracALL')
self.XRFelstrlist, self.XRFcompsarr=(None, None)
if 'xrf' in h5analysis:
self.XRFelstrlist, self.XRFcompsarr=getternarycomps(self.h5path, self.h5groupstr, elstr=elstr, infotype='XRFmolfracALL')
h5file.close()
self.xgrid=self.attrdict['xgrid']
self.zgrid=self.attrdict['zgrid']
self.xcoords=self.attrdict['x']
self.zcoords=self.attrdict['z']
self.typeComboBox=QComboBox()
self.compnavw=None
if not self.DPcompsarr is None:
self.compnavw = compnavigatorwidget(self, self.DPcompsarr, self.DPelstrlist)
self.typeComboBox.insertItem(999, 'COMP:DepProf with '+','.join(self.DPelstrlist))
elif not self.XRFcompsarr is None:
self.compnavw = compnavigatorwidget(self, self.XRFcompsarr, self.XRFelstrlist)
if not self.XRFcompsarr is None:
self.typeComboBox.insertItem(999, 'COMP:XRF with '+','.join(self.XRFelstrlist))
self.posnnavw = subnavigatorwidget(self, self.xgrid, self.zgrid, self.xcoords, self.zcoords)
self.typeComboBox.insertItem(999, 'POSITION')
#QObject.connect(self.navw, SIGNAL("picclicked"), self.picclickprocess)
#QObject.connect(self.typeComboBox,SIGNAL("currentIndexChanged()"),self.typechanged)
QObject.connect(self.typeComboBox,SIGNAL("activated(QString)"),self.typechanged)
self.typeLabel=QLabel()
self.typeLabel.setText('type of data for\nneighbor calc')
self.dlnyCheckBox=QCheckBox()
self.dlnyCheckBox.setText('Use Delaunay Triangulation')
self.dlnyCheckBox.setChecked(True)
self.setWindowTitle('Calculate and plot map of data point neighbor')
self.calcButton=QPushButton()
self.calcButton.setText('calculate neighbors')
QObject.connect(self.calcButton,SIGNAL("pressed()"),self.neighborcalc)
self.saveButton=QPushButton()
self.saveButton.setText('save neighbors\nfor analysis')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.saveneigh)
self.saveposnnavimageButton=QPushButton()
self.saveposnnavimageButton.setText('save .png\npositions')
QObject.connect(self.saveposnnavimageButton,SIGNAL("pressed()"),self.saveposnnavimage)
self.savecompnavimageButton=QPushButton()
self.savecompnavimageButton.setText('save .png\ncompositions')
QObject.connect(self.savecompnavimageButton,SIGNAL("pressed()"),self.savecompnavimage)
self.radiusLabel=QLabel()
self.radiusLabel.setText('radius for neighbor\nassociation, at.frac or mm')
self.radiusSpinBox=QDoubleSpinBox()
self.radiusSpinBox.setRange(0, 999.)
self.radiusSpinBox.setValue(.15)
layout=QGridLayout()
layout.addWidget(self.radiusLabel, 1, 0, 1, 1)
layout.addWidget(self.radiusSpinBox, 2, 0, 1, 1)
layout.addWidget(self.typeLabel, 3, 0, 1, 1)
layout.addWidget(self.typeComboBox, 4, 0, 1, 1)
layout.addWidget(self.dlnyCheckBox, 5, 0, 1, 1)
layout.addWidget(self.calcButton, 6, 0, 1, 1)
layout.addWidget(self.saveButton, 7, 0, 1, 1)
layout.addWidget(self.saveposnnavimageButton, 0, 4, 1, 1)
layout.addWidget(self.posnnavw, 1, 1, 8, 4)
self.posnnavw.plotpoints(self.pointlist, [])
if not self.compnavw is None:
layout.addWidget(self.savecompnavimageButton, 0, 8, 1, 1)
layout.addWidget(self.compnavw, 1, 5, 8, 4)
self.compnavw.plotpoints(self.pointlist, [])
self.setLayout(layout)
self.neighbors=None
self.typeComboBox.setCurrentIndex(0)
def typechanged(self, garbage):
typestr=unicode(self.typeComboBox.currentText())
if 'COMP' in typestr:
if 'DepProf' in typestr:
self.compnavw.reinit(comp=self.DPcompsarr, elstrlist=self.DPelstrlist)
elif 'XRF' in typestr:
self.compnavw.reinit(comp=self.XRFcompsarr, elstrlist=self.XRFelstrlist)
self.compnavw.fig.canvas.draw()
def neighborcalc(self):
self.usedlny=self.dlnyCheckBox.isChecked()
self.critdist=self.radiusSpinBox.value()
self.typestr=unicode(self.typeComboBox.currentText())
if 'COMP' in self.typestr:
if 'DepProf' in self.typestr:
if self.usedlny:
self.neighbors=findcompnieghbors(self.DPcompsarr, pointlist=self.pointlist)
else:
self.neighbors=findneighborswithinradius(compdistarr_comp(self.DPcompsarr), self.critdist, pointlist=self.pointlist)
elif 'XRF' in self.typestr:
if self.usedlny:
self.neighbors=findcompnieghbors(self.XRFcompsarr, pointlist=self.pointlist)
else:
self.neighbors=findneighborswithinradius(compdistarr_comp(self.XRFcompsarr), self.critdist, pointlist=self.pointlist)
elif 'POSITION' in self.typestr:
if self.usedlny:
self.neighbors=findposnnieghbors(self.xcoords, self.zcoords, pointlist=self.pointlist, critdist=self.critdist)
else:
dist=numpy.sqrt(numpy.add.outer(self.xcoords, -1.0*self.xcoords)**2+numpy.add.outer(self.zcoords, -1.0*self.zcoords)**2)
self.neighbors=findneighborswithinradius(dist, self.critdist, pointlist=self.pointlist)
print 'Neighbors'
print self.neighbors
if not self.neighbors is None:
self.posnnavw.reinit()
self.compnavw.reinit()
self.posnnavw.plotneighbors(self.neighbors)
self.compnavw.plotneighbors(self.neighbors)
self.posnnavw.fig.canvas.draw()
self.compnavw.fig.canvas.draw()
def saveneigh(self):
if self.neighbors is None:
QMessageBox.warning(self,"failed", 'Neighbors have not been successfully calculated')
else:
pardict={}
pardict['calctype']=str(self.typestr)
pardict['critdist']=self.critdist
if self.usedlny:
pardict['dlny']=1
else:
pardict['dlny']=0
saveneighbors(self.h5path, self.h5groupstr, self.neighbors, pardict)
def savecompnavimage(self):
self.compnavw.save(os.path.join(self.runpath, ''.join((self.savename1, '_NeighborComposition')).replace('\\','/').encode()))
def saveposnnavimage(self):
self.posnnavw.save(os.path.join(self.runpath, ''.join((self.savename1, '_NeighborPosition')).replace('\\','/').encode()))
class plot2dchessrunwindow(QDialog):
def __init__(self, parent, path, runpath):
super(plot2dchessrunwindow, self).__init__(parent)
self.path=path
self.runpath=runpath
self.savename1=os.path.split(self.path)[1][0:-2]
h5chessrun=h5py.File(self.path, mode='r')
self.treeWidget=QTreeWidget()
self.rootitem=QTreeWidgetItem([os.path.split(self.path)[1]], 0)
self.treeWidget.addTopLevelItem(self.rootitem)
self.createTree(h5chessrun, self.rootitem)
h5chessrun.close()
self.logCheckBox=QCheckBox()
self.logCheckBox.setText('logarithmic\nintensity')
self.logCheckBox.setChecked(False)
self.drawButton=QPushButton()
self.drawButton.setText('draw image')
QObject.connect(self.drawButton,SIGNAL("pressed()"),self.draw)
self.saveButton=QPushButton()
self.saveButton.setText('save .png')
QObject.connect(self.saveButton,SIGNAL("pressed()"),self.save)
rangelayout=QVBoxLayout()
rangelabel=QLabel()
rangelabel.setText('Range for cbar:')
self.rangeLineEdit=QLineEdit()
rangelayout.addWidget(rangelabel)
rangelayout.addWidget(self.rangeLineEdit)
cmaplayout=QVBoxLayout()
cmaplabel=QLabel()
cmaplabel.setText('cmap:')
self.cmapLineEdit=QLineEdit()
self.cmapLineEdit.setText('jet')
cmaplayout.addWidget(cmaplabel)
cmaplayout.addWidget(self.cmapLineEdit)
toplayout=QHBoxLayout()
toplayout.addWidget(self.drawButton)
toplayout.addLayout(cmaplayout)
toplayout.addLayout(rangelayout)
toplayout.addWidget(self.logCheckBox)
toplayout.addWidget(self.saveButton)
self.plotw = plotwidget(self, width=5, height=5, dpi=100)
layout=QGridLayout()
layout.addLayout(toplayout, 1, 1, 1, 10)
layout.addWidget(self.treeWidget, 2, 1, 10, 4)
layout.addWidget(self.plotw, 2, 5, 10, 6)
toolbar=self.plotw.gettoolbarinstance()
self.setLayout(layout)
def createTree(self, startnode, parentitem):
#print startnode
#print startnode.listobjects()
for node in startnode.iterobjects():
if isinstance(node, h5py.Dataset) and len(node.shape)==2:
item=QTreeWidgetItem([node.name.rpartition('/')[2]+`node.shape`], 0)
parentitem.addChild(item)
elif isinstance(node, h5py.Group):
item=QTreeWidgetItem([node.name.rpartition('/')[2]], 0)
parentitem.addChild(item)
self.createTree(node, item)
def draw(self):
items=self.treeWidget.selectedItems()
if len(items)==0:
return
item=items[0]
if not '(' in str(item.text(0)):
return
h5grpstr=''
childname=''
while item!=self.rootitem:
name=str(item.text(0))
if '(' in name:
name=name.partition('(')[0]
if childname=='':
childname=name
h5grpstr='/'.join((name, h5grpstr))
item=item.parent()
h5grpstr=h5grpstr[:-1]
self.arrname='_'.join((name, childname)) #name will be the chessrun name
h5chessrun=h5py.File(self.path, mode='r')
plotarr=readh5pyarray(h5chessrun[h5grpstr])
h5chessrun.close()
rangestr=unicode(self.rangeLineEdit.text())
try:
range=eval(rangestr)
if isinstance(range,(int,float)):
range=(0., 1.*range)
if len(range)==1:
range=(0., range[0])
except:
range=None
self.plotw.performplot(plotarr, log=self.logCheckBox.isChecked(), colrange=range)
def save(self):
self.plotw.save(os.path.join(self.runpath, ''.join((self.savename1, self.arrname))).replace('\\','/').encode())
class buildnewscanDialog(QDialog,
ui_buildnewscan.Ui_buildnewscanDialog):
#in order to get here, h5path and groupstr exist and analysis has been started. can replace images and a new scan will be created with the images replaced in the XRD and XRF data as well as in the analysis attrdict, but the x,z coordinates of the original scan are maintained. Can also append data from otehr scans. in this case, even if the set of x,z would coincide with a spec command, the command becomes 'USER-COMPILED' and is treated as a a sort of a2scan with arbitrary 1-D path.
def __init__(self, parent, h5path, h5groupstr):
super(buildnewscanDialog, self).__init__(parent)
self.setupUi(self)
self.h5path=h5path
self.copygroupindex=0
self.validgrp_name=[]
self.validgrp_attr=[]
self.copyable_validgrpind=[]# a list of the indeces of validgrp_ that can be copied from
h5file=h5py.File(self.h5path, mode='r')
detectors=[]
for group in h5file.iterobjects():
if isinstance(group,h5py.Group) and 'analysis' in group:
detectors=[getxrdname(group['analysis'])]
if len(detectors)==0:
h5chess=CHESSRUNFILE()
detectors=h5chess.attrs['DetectorNames']
h5chess.close()
count=0
for group in h5file.iterobjects():
if isinstance(group,h5py.Group) and ('measurement' in group) and (True in [dn in group['measurement'] for dn in detectors]):
grpname=group.name.rpartition('/')[2]
self.validgrp_name+=[grpname]
if ('analysis' in group and getxrdname(group['analysis']) in group['analysis']):
self.copyable_validgrpind+=[count]
self.validgrp_attr+=[copy.deepcopy(getattr(self.h5path, grpname))]
else:
temp_acsh=group.attrs['acquisition_shape']
if isinstance(temp_acsh, str):
temp_acsh=eval(temp_acsh)
npts=numpy.prod(numpy.int16(temp_acsh))
samx=None
samz=None
if 'samx' in group['measurement/scalar_data']:
samx=group['measurement/scalar_data/samx'][:]
if 'samz' in group['measurement/scalar_data']:
samz=group['measurement/scalar_data/samz'][:]
if samx is None:
samx=numpy.ones(npts, dtype='float32')*group['measurement/positioners/samx'].value
if samz is None:
samz=numpy.ones(npts, dtype='float32')*group['measurement/positioners/samz'].value
tempd={}
tempd['x']=samx
tempd['z']=samz
tempd['command']=group.attrs['acquisition_command']
self.validgrp_attr+=[copy.deepcopy(tempd)]
if grpname==h5groupstr:
self.copygroupindex=count
count+=1
h5file.close()
QObject.connect(self.copynameComboBox,SIGNAL("activated(QString)"),self.fillreplaceimageComboBox)
QObject.connect(self.replaceimageComboBox,SIGNAL("activated(QString)"),self.fillnewimageComboBox)
QObject.connect(self.radiusSpinBox,SIGNAL("valueChange(int)"),self.fillnewimageComboBox)
self.initcomboboxes()
def initcomboboxes(self):
self.copynameComboBox.clear()
self.appendnameComboBox.clear()
for count, ind in enumerate(self.copyable_validgrpind):
self.copynameComboBox.insertItem(count, ':'.join((self.validgrp_name[ind], self.validgrp_attr[ind]['command'])))
self.copynameComboBox.setCurrentIndex(self.copyable_validgrpind.index(self.copygroupindex))
for count, (nam, d) in enumerate(zip(self.validgrp_name, self.validgrp_attr)):
self.appendnameComboBox.insertItem(count, ':'.join((nam, d['command'])))
self.appendnameComboBox.setCurrentIndex(0)
self.fillreplaceimageComboBox()
def fillreplaceimageComboBox(self):
self.replaceimageComboBox.clear()
self.copygroupindex=self.copyable_validgrpind[self.copynameComboBox.currentIndex()]
attrdict=self.validgrp_attr[self.copygroupindex]
for count in range(numpts_attrdict(attrdict)):
self.replaceimageComboBox.insertItem(count, '%d' %count)
self.replaceimageComboBox.setCurrentIndex(0)
self.fillnewimageComboBox()
self.replacelistLineEdit.setText('')
self.newlistLineEdit.setText('')
def fillnewimageComboBox(self):
radius=self.radiusSpinBox.value()
imind=self.replaceimageComboBox.currentIndex()
x0=self.validgrp_attr[self.copygroupindex]['x'][imind]
z0=self.validgrp_attr[self.copygroupindex]['z'][imind]
possbielreplacements=[]
for count, (nam, attr) in enumerate(zip(self.validgrp_name, self.validgrp_attr)):
if count==self.copygroupindex:#do not allow replacements from within own scan - this could be achieved by user through a copy and then a copy+replace
continue
distsq=(numpy.float32(attr['x'])-x0)**2+(numpy.float32(attr['z'])-z0)**2
possbielreplacements+=['%s:%d' %(nam, i) for i in numpy.where(distsq<radius**2)[0]]
self.newimageComboBox.clear()
for count, s in enumerate(possbielreplacements):
self.newimageComboBox.insertItem(count, s)
self.newimageComboBox.setCurrentIndex(0)
@pyqtSignature("")
def on_replacePushButton_clicked(self):
self.appendcomboboxtolineedit(self.replacelistLineEdit, self.replaceimageComboBox)
self.appendcomboboxtolineedit(self.newlistLineEdit, self.newimageComboBox)
@pyqtSignature("")
def on_appendPushButton_clicked(self):
self.appendcomboboxtolineedit(self.appendlistLineEdit, self.appendnameComboBox)
def lineedittolist(self, le):
lestr=str(unicode(le.text()))
strlist=[]
lestr.strip()
while len(lestr)>0:
temp, garbage, lestr=lestr.partition(',')
temp=temp.strip()
if len(temp)>0:
strlist+=[temp]
return strlist
def appendcomboboxtolineedit(self, le, cb):
temp=str(unicode(le.text()))
if temp!='':
temp+=', '
#temp+=str(unicode(cb.currentText())).partition(':')[0]
temp+=str(unicode(cb.currentText()))
le.setText(temp)
def createnewscandict(self):
newscandict={}
sourcegrpname=str(unicode(self.copynameComboBox.currentText())).partition(':')[0]
newscandict['sourcename']=sourcegrpname
try:
xrdname=self.validgrp_attr[self.validgrp_name.index(sourcegrpname)]['xrdname']
except:
print 'FAILED TO GET THE XRD DETECTOR NAME. EITHER THIS IS AN .h5 FROM BEFORE NOV 2010 OR THERE IS A PROBLEM FINDING IT. THE SOURCE GROUP NAME IS ', sourcegrpname, ' WHICH WAS BEING LOCATED IN THE VALID GROUP LIST: ', self.validgrp_name
xrdname='mar345'
newscandict['xrdname']=xrdname
repimagelist=self.lineedittolist(self.replacelistLineEdit)
newimagelist=self.lineedittolist(self.newlistLineEdit)
replist=[]
namlist=[]
indlist=[]
for repim, newim in zip(repimagelist, newimagelist):
try:
indlist+=[eval(newim.partition(':')[2])]
namlist+=[newim.partition(':')[0]]
replist+=[eval(repim)]
except:
QMessageBox.warning(self,"failed", "Aborting because there is a formatting error in the replacement of %s by %s." %(repim, newim))
return None
newscandict['ind_tobereplaced']=replist
newscandict['newimage_scanname']=namlist
newscandict['newimage_ind']=indlist
appnamelist=self.lineedittolist(self.appendlistLineEdit)
appattrlist=[]
for appname in appnamelist:
if not appname in self.validgrp_name:
# print '*', appname, '*', len(appname)
# print self.validgrp_name, appname in self.validgrp_name
QMessageBox.warning(self,"failed", "Aborting because the append scan %s was not found." %appname)
return None
appattrlist+=[self.validgrp_attr[self.validgrp_name.index(appname)]]
newscandict['appendscan_name']=appnamelist
newscandict['appendscan_attr']=copy.deepcopy(appattrlist)
return newscandict
class xrfanalysisDialog(QDialog,
ui_xrf_analysis.Ui_XRFAnalysisDialog):
def __init__(self, parent, h5path, h5groupstr): #if pass pointlist then assume the DepProf data is there to perform the cal
super(xrfanalysisDialog, self).__init__(parent)
self.setupUi(self)
# self.FluxMethodComboBox.clear()
# self.FluxMethodComboBox.insertItem(0, 'Use Default Value')
# self.FluxMethodComboBox.insertItem(1, 'Enter Flux Value')
# QObject.connect(self.FluxMethodComboBox, SIGNAL("currentIndexChanged()"), self.fluxmethodchanged)
self.attrdict=getattr(h5path, h5groupstr)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.databasedictlist=readxrfinfodatabase()
for count, d in enumerate(self.databasedictlist):
self.chessrunComboBox.insertItem(count, d['name'])
QObject.connect(self.chessrunComboBox,SIGNAL("activated(QString)"),self.chessrunchanged)
self.gunpropdict, self.dpcomp, self.dpnm=getinfoforxrf(h5path, h5groupstr)
self.ElLines=[self.ElLineEdit0, self.ElLineEdit1, self.ElLineEdit2, self.ElLineEdit3, self.ElLineEdit4]
for el, lineedit in zip(self.gunpropdict['symbol'], self.ElLines):
lineedit.setText(el)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.beamenergy=eV_nm(self.attrdict['wavelength'])/1000.0
self.dpissufficient=not (self.dpcomp is None)
if self.dpissufficient:
# self.radioButtonInd.setVisible(True)
# self.FluxIndComboBox.setVisible(True)
pointlist=self.attrdict['pointlist']
self.FluxIndComboBox.clear()
for ind in pointlist:
self.FluxIndComboBox.insertItem(999, '%d' %ind)
dist=(numpy.float32(pointlist)-numpts_attrdict(self.attrdict)//2)**2 #this is to try to default to the substrate center index
self.FluxIndComboBox.setCurrentIndex(numpy.where(dist==dist.min())[0][0])
else:
self.FluxIndComboBox.setDisabled(True)
self.radioButtonInd.setDisabled(True)
self.DepProfEstCheckBox.setChecked(False)
self.DepProfEstCheckBox.setDisabled(True)
self.UnderLineEdit.setText('Ti')
self.UnderSpinBox.setValue(12)
self.SicmSpinBox.setValue(0.45)
p=PYMCACFGpath()
self.cfgpathstart,garbage=os.path.split(p)
crs=self.attrdict['chessrunstr']
self.cfgfilenames=[[f, crs in f] for f in os.listdir(self.cfgpathstart) if os.path.splitext(f)[1]==os.path.splitext(p)[1]]
self.cfgfilenames.sort(key=operator.itemgetter(1), reverse=True)
self.cfgfilenames=[f[0] for f in self.cfgfilenames]
for count, fname in enumerate(self.cfgfilenames):
self.cfgComboBox.insertItem(count, fname)
self.bckndeltr_rate=[]
self.cfgpath=None
def chessrunchanged(self):
name=str(self.chessrunComboBox.currentText())
d=self.databasedictlist[[i for i, d in enumerate(self.databasedictlist) if d['name']==name][0]]
if 'Sicm' in d:
self.SicmSpinBox.setValue(d['Sicm'])
if 'enmin' in d:
self.enminSpinBox.setValue(d['enmin'])
if 'enmax' in d:
self.enmaxSpinBox.setValue(d['enmax'])
if 'underlayer' in d:
self.UnderLineEdit.setText(d['underlayer'][0])
self.UnderSpinBox.setValue(d['underlayer'][1])
if 'time' in d:
self.timeLineEdit.setText(d['time'])
if 'cfgfile' in d and d['cfgfile'] in self.cfgfilenames:
self.cfgComboBox.setCurrentIndex(self.cfgfilenames.index(d['cfgfile']))
if 'BckndCounts' in d:
self.bckndeltr_rate=[[' '.join((el, tr)), ct] for (el,tr,ct) in d['BckndCounts']]
def fluxmethodchanged(self):
a='Select image\nfor flux cal'
print'Enter flux\nvalue', self.FluxMethodComboBox.currentIndex()
def eltr_cfg(self, el, tr):
if isinstance(tr, list):
return [' '.join((el, t)) for t in tr]
else:
return [' '.join((el, tr))]
@pyqtSignature("")
def on_transitionsPushButton_clicked(self):
try:
h5file=h5py.File(self.h5path, mode='r')
self.time=readh5pyarray(h5file['/'.join((self.h5groupstr, 'measurement/scalar_data', str(self.timeLineEdit.text())))])
h5file.close()
except:
QMessageBox.warning(self,"aborting", "aborting calculation because could not find that scalar_data")
print '/'.join((self.h5groupstr, 'measurement/scalar_data', str(self.timeLineEdit.text())))
return
self.el=[]
for le in self.ElLines:
s=str(le.text())
if s!='':
self.el+=[s]
self.cfgpath=os.path.join(self.cfgpathstart, str(self.cfgComboBox.currentText()))
self.pymca_config = getcfgdict_txt(self.cfgpath)
dfltfitlist=flatten([self.eltr_cfg(el, tr) for el, tr in self.pymca_config['peaks'].iteritems()])
allpeaksdictlist, quantlist, foundpeaks=FindXrays(self.el, energy=self.beamenergy)
self.el=numpy.array(self.el)
self.el=self.el[numpy.bool_(foundpeaks)]
repen=[d['repen'] for d in allpeaksdictlist if d['eltr'] in quantlist]
filmfitlist=[d['eltr'] for d in allpeaksdictlist]
if self.UnderSpinBox.value()>0:
underlayerdictlist, garbage, und_foundpk=FindXrays([str(self.UnderLineEdit.text())], energy=self.beamenergy)
filmfitlist+=[d['eltr'] for d in underlayerdictlist]
alreadyinlist=list(set(dfltfitlist)&set(quantlist))
fitlist=list(set(dfltfitlist)|set(filmfitlist))
bcknd=numpy.zeros(len(self.el), dtype='float32')
bckndind_rate=[[quantlist.index(eltr), rate] for eltr, rate in self.bckndeltr_rate if eltr in quantlist]
if len(bckndind_rate)>0:
bckndind, rate=zip(*bckndind_rate)
bcknd[bckndind]=numpy.float32(rate)*numpy.max(self.time)
dens=numpy.ones(len(self.el), dtype='float32')
mass=numpy.ones(len(self.el), dtype='float32')
comp=numpy.ones(len(self.el), dtype='float32')/len(self.el) #this way if the composition is not available then it will guess something reasonable
elmap=[el in self.gunpropdict['symbol'] for el in self.el]
elmap=numpy.bool_(elmap)
gpdmap=[el in self.el for el in self.gunpropdict['symbol']]
gpdmap=numpy.bool_(gpdmap)
dens[elmap]=numpy.float32(self.gunpropdict['d'])[gpdmap]
mass[elmap]=numpy.float32(self.gunpropdict['M'])[gpdmap]
cmr=numpy.float32(self.gunpropdict['CenterMolRates'])[gpdmap]
cmr/=cmr.sum()
comp[elmap]=cmr
comp/=comp.sum()
if not numpy.all(elmap): #not all of the quant elements were in funpropdict. even if they were, availability of dep prof is not guaranteed
elsym, elM, eld = zip(*get_elMd_el(self.el[numpy.logical_not(elmap)]))# assume that if xray were found for an element then the mass and density can be found. If this fails, the next line will fail
dens[numpy.logical_not(elmap)]=numpy.float32(eld)
mass[numpy.logical_not(elmap)]=numpy.float32(elM)
self.dpissufficient=False
else:
self.dpissufficient= not (self.dpcomp is None)
if not self.dpissufficient:
if self.DepProfEstCheckBox.isChecked() or self.radioButtonInd.isChecked():
QMessageBox.warning(self,"problem", "calibration and dep prof estimates not possible with that set of elements")
self.DepProfEstCheckBox.setChecked(False)
self.radioButtonDef.setChecked(False)
self.alreadyinlistLineEdit.setText(', '.join(alreadyinlist))
self.fitLineEdit.setText(','.join(fitlist))
self.quantLineEdit.setText(','.join(quantlist))
self.bckndLineEdit.setText(self.strformat(bcknd, ['%.3e'])[1:-1])
self.densityLineEdit.setText(self.strformat(dens, ['%.2f'])[1:-1])
self.massLineEdit.setText(self.strformat(mass, ['%.2f'])[1:-1])
self.compLineEdit.setText(self.strformat(comp, ['%.2f'])[1:-1])
self.repenLineEdit.setText(self.strformat(repen, ['%.2f'])[1:-1])
self.dfltfitlist=dfltfitlist
def readlineedit(self, le, numcvt=True):
c=str(le.text())
c=c.strip()
ans=[]
while len(c)>0:
a, b, c=c.partition(',')
a=a.strip()
c=c.strip()
try:
if not numcvt:
raise
b=eval(a)
except:
b=a
ans+=[b]
return ans
def strformat(self, val, frmt):
s=''
v=val
f=frmt
if f is None:
s+=`v`
elif isinstance(f, list):
s+='['
for count, subv in enumerate(v):
if count>0:
s+=','
s+=f[0] %subv
s+=']'
else:
s+=f %v
return s
def buildparstr(self, el, quant, dens, mass, comp, bcknd, repen, cfgpath, addlist, fluxcalstr, dpbool, under, sicm, time, dlambdastr, mflambdastr):
vl=[el, quant, dens, mass, comp, bcknd, repen, cfgpath, addlist, fluxcalstr, dpbool, under, sicm, time, dlambdastr, mflambdastr]
nl=["elements", "quantElTr", 'eld', 'elM', 'approxstoich', "BckndCounts", 'RepEn','cfgpath', 'otherElTr', 'FluxCal', 'DepProfEst', 'Underlayer', 'Sicm', 'time', 'dlambda', 'mflambda']
#fl=[None, None, ['%.2f'], ['%.2f'], ['%.2f'], ['%.3e'], ['%.2f'], None, None, None, '%.3f', None, None]
fl=[None, None, ['%s'], ['%s'], ['%.2f'], ['%s'], ['%s'], None, None, '%s', None, None, '%.3f', None, None, None]
al=["SecondaryAction='Notify'"]
s=''
for count, (n, v, f) in enumerate(zip(nl, vl, fl)):
if count>0:
s+=", "
s+=n+"="
s+=self.strformat(v, f)
for a in al:
s+=", "+a
return s
def ExitRoutine(self):
# try:
# if self.cfgpath is None:
# raise
if self.radioButtonEnt.isChecked():
self.fluxcalstr='%.10e' %self.FluxSpinBox.value()
elif self.radioButtonInd.isChecked():
self.fluxcalstr="'CalUsing%s'" %str(self.FluxIndComboBox.currentText())
else:
self.fluxcalstr="'Default'"
fitlist=self.readlineedit(self.fitLineEdit)
quantlist=self.readlineedit(self.quantLineEdit)
BckndCounts=self.readlineedit(self.bckndLineEdit, numcvt=False)
dens=self.readlineedit(self.densityLineEdit, numcvt=False)
mass=self.readlineedit(self.massLineEdit, numcvt=False)
comp=numpy.float32(self.readlineedit(self.compLineEdit), numcvt=True)
comp/=comp.sum()
repen=self.readlineedit(self.repenLineEdit, numcvt=False)
dlambdastr=str(self.dlambdaLineEdit.text())
mflambdastr=str(self.mflambdaLineEdit.text())
addlist=list((set(fitlist)-set(self.dfltfitlist))-set(quantlist))
unel=str(self.UnderLineEdit.text())
uneldict=GunPropertyDict([unel])
if uneldict is None:
print 'WARNING: UNDERLAYER ELEMENT NOT FOUND - effectively removing underlayer'
self.Underlayer=('Ti', 0.1, 0)
else:
self.Underlayer=(unel, uneldict['d'][0], self.UnderSpinBox.value())
self.Sicm=self.SicmSpinBox.value()
self.DepProfEst=self.DepProfEstCheckBox.isChecked()
self.parstr=self.buildparstr(list(self.el), quantlist, dens, mass, comp, BckndCounts, repen, self.cfgpath, addlist, self.fluxcalstr, self.DepProfEst, self.Underlayer, self.Sicm, str(self.timeLineEdit.text()), dlambdastr, mflambdastr)
# except:
# self.parstr = None
class pdfsearchDialog(QDialog,
ui_pdfsearch.Ui_pdfsearchDialog):
def __init__(self, parent, plotw, offset=0., filename='PDFentries.txt', cvtfcn=lambda x:d_q(x/10.0)):
super(pdfsearchDialog, self).__init__(parent)
self.setupUi(self)
self.plotw=plotw
self.ax=self.plotw.axes
self.startinglineindex=len(self.ax.lines)
self.startingtextindex=len(self.ax.texts)
self.afterpdflistlinesindex=self.startinglineindex
self.offset=offset
self.dfltheight=(self.ax.get_ylim()[1]-self.offset)*0.8
self.heightSpinBox.setValue(self.dfltheight)
self.lineind_textind_plotlist=[]
self.numpdflabels=0
self.pdfname, self.pdflist=readpdffile(os.path.join(defaultdir('pdfentries'), filename))
self.pdflist=[[[cvtfcn(d), h] for d, h in pdf] for pdf in self.pdflist]
QObject.connect(self.pdfListWidget,SIGNAL("itemSelectionChanged()"),self.plotsinglepdfentry)
for l in self.pdfname:
self.pdfListWidget.addItem(l)
@pyqtSignature("")
def on_findPushButton_clicked(self):
lelist=[self.searchLineEdit0, self.searchLineEdit1, self.searchLineEdit2, self.searchLineEdit3]
slist=[str(le.text()) for le in lelist]
for count, pdfname in enumerate(self.pdfname):
searchbool=True
for s in slist:
searchbool*=s in pdfname
plotbool=True
for ind in range(self.plotListWidget.count()):
plotbool*=not str(self.plotListWidget.item(ind).text()) in self.pdfname
self.pdfListWidget.item(count).setHidden(not (searchbool and plotbool))
def plotsinglepdfentry(self):
if self.plotsingleCheckBox.isChecked():
row=self.pdfListWidget.currentRow()
self.clearpdfplots(self.afterpdflistlinesindex)
self.drawpdfpeaks(row)
def plotpdflist(self):
self.clearpdfplots(self.startinglineindex)
self.numpdflabels=0
for i in range(self.plotListWidget.count()):
if not self.plotListWidget.item(i).isHidden():
self.drawpdfpeaks(i, fromplotlist=True)
self.afterpdflistlinesindex=len(self.ax.lines)
def clearpdfplots(self, startind, stopind=None):
if stopind is None:
stopind=len(self.ax.lines)
reduceind=lambda ind, redbool: (((ind is None) and (None, )) or (redbool and (ind-1, )) or (ind,))[0]
for i in range(startind, stopind)[::-1]:# go through the delete indeces but if one peak is in the dleete indeces then delete the entire pdf entry and the label
ind=[cnt for cnt, lineinds in enumerate(map(operator.itemgetter(0),self.lineind_textind_plotlist)) if i in lineinds]
if len(ind)>0:
ind=ind[0]
lineinds, textind=self.lineind_textind_plotlist.pop(ind)
if textind is None:
textind=99999
else:
del self.ax.texts[textind]
for li in sorted(lineinds)[::-1]:
del self.ax.lines[li]
self.lineind_textind_plotlist=[[list(reduceind(numpy.int16(li), li[0]>lineinds[0])), reduceind(ti, ti>textind)] for li, ti in self.lineind_textind_plotlist]
if len([ti for li, ti in self.lineind_textind_plotlist if not ti is None])==0:#if all the label indeces are None there are no indeces so start the counter over
self.numpdflabels=0
@pyqtSignature("")
def on_addPushButton_clicked(self):
self.plotListWidget.addItem(self.pdfname[self.pdfListWidget.currentRow()])
self.pdfListWidget.currentItem().setHidden(True)
self.labelListWidget.addItem(self.labelLineEdit.text())
self.colListWidget.addItem(self.colLineEdit.text())
self.heightListWidget.addItem('%.2f' %self.heightSpinBox.value())
self.plotpdflist()
@pyqtSignature("")
def on_removePushButton_clicked(self):
item=self.plotListWidget.currentItem()
if not item is None:
txt=str(item.text())
if txt in self.pdfname:
self.pdfListWidget.item(self.pdfname.index(txt)).setHidden(False)
row=self.plotListWidget.currentRow()
for ListWidget in [self.plotListWidget, self.labelListWidget, self.colListWidget, self.heightListWidget]:
ListWidget.item(row).setHidden(True)
#ListWidget.removeItemWidget(ListWidget.item(row))
self.plotpdflist()
def drawpdfpeaks(self, pdfindex, fromplotlist=False):
if fromplotlist:
label=str(self.labelListWidget.item(pdfindex).text())
colstr=str(self.colListWidget.item(pdfindex).text())
try:
height=eval(str(self.heightListWidget.item(pdfindex).text()))*1.0
except:
print 'height interpretation error'
height=self.dfltheight
pdfindex=self.pdfname.index(str(self.plotListWidget.item(pdfindex).text()))
else:
label=str(self.labelLineEdit.text())
colstr=str(self.colLineEdit.text())
height=self.heightSpinBox.value()
if colstr=='':
colstr='r'
pdf=self.pdflist[pdfindex]
self.ax.hold(True)
lineindstart=len(self.ax.lines)
for q, h in pdf:
h*=height
self.ax.plot([q, q], [self.offset, self.offset+h], colstr)
lineindstop=len(self.ax.lines)
if label=='':
textind=None
else:
textind=len(self.ax.texts)
for garbage in range(self.numpdflabels):
label=''.join((' ', label))
self.numpdflabels+=1
ylim=self.ax.get_ylim()
xlim=self.ax.get_xlim()
fs=14
sp=(fs*1.4/72.)/self.ax.figure.get_figheight()
self.ax.text(xlim[1]-.03*(xlim[1]-xlim[0]), ylim[1]-(.03+self.numpdflabels*sp)*(ylim[1]-ylim[0]), label, color=colstr[0], fontsize=fs, horizontalalignment='right')
self.lineind_textind_plotlist+=[[range(lineindstart, lineindstop), textind]]
self.plotw.fig.canvas.draw()
class editrawxrdwindow(QDialog,
ui_editrawxrdDialog.Ui_editrawxrdDialog):
#***
def __init__(self, parent, h5path, h5groupstr=None, h5grppath=None):#either pass h5grppath which is the entire path to the XRD group that contains counts or the normal h5groupstr
super(editrawxrdwindow, self).__init__(parent)
self.setupUi(self)
self.h5path=h5path
self.h5groupstr=h5groupstr
self.h5grppath=h5grppath
h5file=h5py.File(self.h5path, mode='r')
if self.h5grppath is None:
h5analysis=h5file['/'.join((self.h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((self.h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((self.h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
h5sd=h5file['/'.join((self.h5groupstr,'measurement', 'scalar_data'))]
else:
h5marcounts=h5file[h5grppath]['counts']
if 'scalar_data' in h5file[h5grppath].parent:
h5sd=(h5file[h5grppath].parent)['scalar_data']
else:
h5sd=None
s=''
for k, v in h5marcounts.attrs.iteritems():
if k.startswith('mod_'):
s+=': '.join((k.partition('mod_')[2], `v`))+'\n'
if len(s)>0:
s="This raw data has already been modified with the following settings:\n"+s
QMessageBox.warning(self, "REPEAT EDIT", s)
if h5sd is None:
self.normCheckBox.setChecked(False)
self.normCheckBox.setEnabled(False)
prefind=None
else:
count=0
prefind=None
for dset in h5sd.iterobjects():
if isinstance(dset, h5py.Dataset) and dset.shape==h5marcounts.shape[0:1]:
nam=dset.name.rpartition('/')[2]
self.normComboBox.insertItem(count, nam)
if nam=='IC3':
prefind=count
count+=1
if not prefind is None:
self.normComboBox.setCurrentIndex(prefind)
h5file.close()
self.dezingCheckBox.setChecked(True)
self.normCheckBox.setChecked(count>0)
self.multCheckBox.setChecked(count>0)
self.dezingSpinBox.setValue(1.1)
self.dezingComboBox.insertItem(0, 'outlier method')
self.dezingComboBox.insertItem(1, 'by image max val')
self.dezingComboBox.setCurrentIndex(0)
QObject.connect(self.dezingComboBox,SIGNAL("activated(QString)"),self.dezingchanged)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def dezingchanged(self, garbage):
show=self.dezingComboBox.currentIndex()==0
self.dezingLabel.setVisible(show)
self.dezingSpinBox.setVisible(show)
def ExitRoutine(self):
dezingbool=self.dezingCheckBox.isChecked()
normbool=self.normCheckBox.isChecked()
multbool=self.multCheckBox.isChecked()
if dezingbool or normbool or multbool:
a=dezingbool and self.dezingComboBox.currentIndex()==1
b=normbool and str(self.normComboBox.currentText()) or None
c=multbool and self.multSpinBox.value() or None
d=(dezingbool and self.dezingComboBox.currentIndex()==0) and self.dezingSpinBox.value() or None
if self.h5grppath is None:
xrdraw_dezing_rescale(self.h5path, h5groupstr=self.h5groupstr, dezingbool=a, normdsetname=b, multval=c, outlier_nieghbratio=d)
else:
xrdraw_dezing_rescale(self.h5path, h5grppath=self.h5grppath, dezingbool=a, normdsetname=b, multval=c, outlier_nieghbratio=d)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/io/parser/test_c_parser_only.py | 1 | 21552 | """
Tests that apply specifically to the CParser. Unless specifically stated
as a CParser-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the Python parser can accept
further arguments when parsing.
"""
from io import BytesIO, StringIO, TextIOWrapper
import mmap
import os
import tarfile
import numpy as np
import pytest
from pandas.compat import IS64
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, concat
import pandas._testing as tm
@pytest.mark.parametrize(
"malformed",
["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],
ids=["words pointer", "stream pointer", "lines pointer"],
)
def test_buffer_overflow(c_parser_only, malformed):
# see gh-9205: test certain malformed input files that cause
# buffer overflows in tokenizer.c
msg = "Buffer overflow caught - possible malformed input file."
parser = c_parser_only
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(malformed))
def test_buffer_rd_bytes(c_parser_only):
# see gh-12098: src->buffer in the C parser can be freed twice leading
# to a segfault if a corrupt gzip file is read with 'read_csv', and the
# buffer is filled more than once before gzip raises an Exception.
data = (
"\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09"
"\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0"
"\xA6\x4D" + "\x55" * 267 + "\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00"
"\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO"
)
parser = c_parser_only
with tm.assert_produces_warning(RuntimeWarning):
# compression has no effect when passing a non-binary object as input
for _ in range(100):
try:
parser.read_csv(
StringIO(data), compression="gzip", delim_whitespace=True
)
except Exception:
pass
def test_delim_whitespace_custom_terminator(c_parser_only):
# See gh-12912
data = "a b c~1 2 3~4 5 6~7 8 9"
parser = c_parser_only
df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_dtype_and_names_error(c_parser_only):
# see gh-8833: passing both dtype and names
# resulting in an error reporting issue
parser = c_parser_only
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = parser.read_csv(StringIO(data), sep=r"\s+", header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"])
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
# fallback casting
result = parser.read_csv(
StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32}
)
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"])
expected["a"] = expected["a"].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with pytest.raises(ValueError, match="cannot safely convert"):
parser.read_csv(
StringIO(data),
sep=r"\s+",
header=None,
names=["a", "b"],
dtype={"a": np.int32},
)
@pytest.mark.parametrize(
"match,kwargs",
[
# For each of these cases, all of the dtypes are valid, just unsupported.
(
(
"the dtype datetime64 is not supported for parsing, "
"pass this column using parse_dates instead"
),
{"dtype": {"A": "datetime64", "B": "float64"}},
),
(
(
"the dtype datetime64 is not supported for parsing, "
"pass this column using parse_dates instead"
),
{"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]},
),
(
"the dtype timedelta64 is not supported for parsing",
{"dtype": {"A": "timedelta64", "B": "float64"}},
),
("the dtype <U8 is not supported for parsing", {"dtype": {"A": "U8"}}),
],
ids=["dt64-0", "dt64-1", "td64", "<U8"],
)
def test_unsupported_dtype(c_parser_only, match, kwargs):
parser = c_parser_only
df = DataFrame(
np.random.rand(5, 2), columns=list("AB"), index=["1A", "1B", "1C", "1D", "1E"]
)
with tm.ensure_clean("__unsupported_dtype__.csv") as path:
df.to_csv(path)
with pytest.raises(TypeError, match=match):
parser.read_csv(path, index_col=0, **kwargs)
@td.skip_if_32bit
def test_precise_conversion(c_parser_only):
from decimal import Decimal
parser = c_parser_only
normal_errors = []
precise_errors = []
# test numbers between 1 and 2
for num in np.linspace(1.0, 2.0, num=500):
# 25 decimal digits of precision
text = f"a\n{num:.25}"
normal_val = float(
parser.read_csv(StringIO(text), float_precision="legacy")["a"][0]
)
precise_val = float(
parser.read_csv(StringIO(text), float_precision="high")["a"][0]
)
roundtrip_val = float(
parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0]
)
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal(f"{val:.100}") - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
assert roundtrip_val == float(text[2:])
assert sum(precise_errors) <= sum(normal_errors)
assert max(precise_errors) <= max(normal_errors)
def test_usecols_dtypes(c_parser_only):
parser = c_parser_only
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = parser.read_csv(
StringIO(data),
usecols=(0, 1, 2),
names=("a", "b", "c"),
header=None,
converters={"a": str},
dtype={"b": int, "c": float},
)
result2 = parser.read_csv(
StringIO(data),
usecols=(0, 2),
names=("a", "b", "c"),
header=None,
converters={"a": str},
dtype={"b": int, "c": float},
)
assert (result.dtypes == [object, int, float]).all()
assert (result2.dtypes == [object, float]).all()
def test_disable_bool_parsing(c_parser_only):
# see gh-2090
parser = c_parser_only
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = parser.read_csv(StringIO(data), dtype=object)
assert (result.dtypes == object).all()
result = parser.read_csv(StringIO(data), dtype=object, na_filter=False)
assert result["B"][2] == ""
def test_custom_lineterminator(c_parser_only):
parser = c_parser_only
data = "a,b,c~1,2,3~4,5,6"
result = parser.read_csv(StringIO(data), lineterminator="~")
expected = parser.read_csv(StringIO(data.replace("~", "\n")))
tm.assert_frame_equal(result, expected)
def test_parse_ragged_csv(c_parser_only):
parser = c_parser_only
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = parser.read_csv(
StringIO(data), header=None, names=["a", "b", "c", "d", "e"]
)
expected = parser.read_csv(
StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"]
)
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = parser.read_csv(StringIO(data), header=None, names=range(50))
expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex(
columns=range(50)
)
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(c_parser_only):
# see gh-3453
parser = c_parser_only
data = ' a,b,c\r"a,b","e,d","f,f"'
result = parser.read_csv(StringIO(data), header=None)
expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None)
tm.assert_frame_equal(result, expected)
result = parser.read_csv(StringIO(data))
expected = parser.read_csv(StringIO(data.replace("\r", "\n")))
tm.assert_frame_equal(result, expected)
def test_grow_boundary_at_cap(c_parser_only):
# See gh-12494
#
# Cause of error was that the C parser
# was not increasing the buffer size when
# the desired space would fill the buffer
# to capacity, which would later cause a
# buffer overflow error when checking the
# EOF terminator of the CSV stream.
parser = c_parser_only
def test_empty_header_read(count):
s = StringIO("," * count)
expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])
df = parser.read_csv(s)
tm.assert_frame_equal(df, expected)
for cnt in range(1, 101):
test_empty_header_read(cnt)
def test_parse_trim_buffers(c_parser_only):
# This test is part of a bugfix for gh-13703. It attempts to
# to stress the system memory allocator, to cause it to move the
# stream buffer and either let the OS reclaim the region, or let
# other memory requests of parser otherwise modify the contents
# of memory space, where it was formally located.
# This test is designed to cause a `segfault` with unpatched
# `tokenizer.c`. Sometimes the test fails on `segfault`, other
# times it fails due to memory corruption, which causes the
# loaded DataFrame to differ from the expected one.
parser = c_parser_only
# Generate a large mixed-type CSV file on-the-fly (one record is
# approx 1.5KiB).
record_ = (
"""9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z"""
"""ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,"""
"""ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9"""
"""99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,"""
"""9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9."""
"""99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999."""
"""99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ"""
"""ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ"""
"""ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z"""
"""ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,"""
"""9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,"""
"""999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,"""
""",,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999"""
""",9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9."""
"""999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,"""
""",9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z"""
"""ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ"""
""",999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99"""
""",,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-"""
"""9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9"""
""".99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,"""
""",,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9."""
"""99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ"""
"""ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ"""
"""-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ"""
"""ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ"""
""",9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99"""
""",99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9"""
""".99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
)
# Set the number of lines so that a call to `parser_trim_buffers`
# is triggered: after a couple of full chunks are consumed a
# relatively small 'residual' chunk would cause reallocation
# within the parser.
chunksize, n_lines = 128, 2 * 128 + 15
csv_data = "\n".join([record_] * n_lines) + "\n"
# We will use StringIO to load the CSV from this text buffer.
# pd.read_csv() will iterate over the file in chunks and will
# finally read a residual chunk of really small size.
# Generate the expected output: manually create the dataframe
# by splitting by comma and repeating the `n_lines` times.
row = tuple(val_ if val_ else np.nan for val_ in record_.split(","))
expected = DataFrame(
[row for _ in range(n_lines)], dtype=object, columns=None, index=None
)
# Iterate over the CSV file in chunks of `chunksize` lines
with parser.read_csv(
StringIO(csv_data), header=None, dtype=object, chunksize=chunksize
) as chunks_:
result = concat(chunks_, axis=0, ignore_index=True)
# Check for data corruption if there was no segfault
tm.assert_frame_equal(result, expected)
# This extra test was added to replicate the fault in gh-5291.
# Force 'utf-8' encoding, so that `_string_convert` would take
# a different execution branch.
with parser.read_csv(
StringIO(csv_data),
header=None,
dtype=object,
chunksize=chunksize,
encoding="utf_8",
) as chunks_:
result = concat(chunks_, axis=0, ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_internal_null_byte(c_parser_only):
# see gh-14012
#
# The null byte ('\x00') should not be used as a
# true line terminator, escape character, or comment
# character, only as a placeholder to indicate that
# none was specified.
#
# This test should be moved to test_common.py ONLY when
# Python's csv class supports parsing '\x00'.
parser = c_parser_only
names = ["a", "b", "c"]
data = "1,2,3\n4,\x00,6\n7,8,9"
expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names)
result = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(result, expected)
def test_read_nrows_large(c_parser_only):
# gh-7626 - Read only nrows of data in for large inputs (>262144b)
parser = c_parser_only
header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"
data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"
header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"
data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"
test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2
df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)
assert df.size == 1010 * 10
def test_float_precision_round_trip_with_text(c_parser_only):
# see gh-15140
parser = c_parser_only
df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip")
tm.assert_frame_equal(df, DataFrame({0: ["a"]}))
def test_large_difference_in_columns(c_parser_only):
# see gh-14125
parser = c_parser_only
count = 10000
large_row = ("X," * count)[:-1] + "\n"
normal_row = "XXXXXX XXXXXX,111111111111111\n"
test_input = (large_row + normal_row * 6)[:-1]
result = parser.read_csv(StringIO(test_input), header=None, usecols=[0])
rows = test_input.split("\n")
expected = DataFrame([row.split(",")[0] for row in rows])
tm.assert_frame_equal(result, expected)
def test_data_after_quote(c_parser_only):
# see gh-15910
parser = c_parser_only
data = 'a\n1\n"b"a'
result = parser.read_csv(StringIO(data))
expected = DataFrame({"a": ["1", "ba"]})
tm.assert_frame_equal(result, expected)
def test_comment_whitespace_delimited(c_parser_only, capsys):
parser = c_parser_only
test_input = """\
1 2
2 2 3
3 2 3 # 3 fields
4 2 3# 3 fields
5 2 # 2 fields
6 2# 2 fields
7 # 1 field, NaN
8# 1 field, NaN
9 2 3 # skipped line
# comment"""
df = parser.read_csv(
StringIO(test_input),
comment="#",
header=None,
delimiter="\\s+",
skiprows=0,
error_bad_lines=False,
)
captured = capsys.readouterr()
# skipped lines 2, 3, 4, 9
for line_num in (2, 3, 4, 9):
assert f"Skipping line {line_num}" in captured.err
expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])
tm.assert_frame_equal(df, expected)
def test_file_like_no_next(c_parser_only):
# gh-16530: the file-like need not have a "next" or "__next__"
# attribute despite having an "__iter__" attribute.
#
# NOTE: This is only true for the C engine, not Python engine.
class NoNextBuffer(StringIO):
def __next__(self):
raise AttributeError("No next method")
next = __next__
parser = c_parser_only
data = "a\n1"
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoNextBuffer(data))
tm.assert_frame_equal(result, expected)
def test_buffer_rd_bytes_bad_unicode(c_parser_only):
# see gh-22748
t = BytesIO(b"\xB0")
t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
msg = "'utf-8' codec can't encode character"
with pytest.raises(UnicodeError, match=msg):
c_parser_only.read_csv(t, encoding="UTF-8")
@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])
def test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix):
# see gh-16530
#
# Unfortunately, Python's CSV library can't handle
# tarfile objects (expects string, not bytes when
# iterating through a file-like).
parser = c_parser_only
tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix)
with tarfile.open(tar_path, "r") as tar:
data_file = tar.extractfile("tar_data.csv")
out = parser.read_csv(data_file)
expected = DataFrame({"a": [1]})
tm.assert_frame_equal(out, expected)
@pytest.mark.high_memory
def test_bytes_exceed_2gb(c_parser_only):
# see gh-16798
#
# Read from a "CSV" that has a column larger than 2GB.
parser = c_parser_only
if parser.low_memory:
pytest.skip("not a high_memory test")
csv = StringIO("strings\n" + "\n".join(["x" * (1 << 20) for _ in range(2100)]))
df = parser.read_csv(csv)
assert not df.empty
def test_chunk_whitespace_on_boundary(c_parser_only):
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
#
# This test case has a field too large for the Python parser / CSV library.
parser = c_parser_only
chunk1 = "a" * (1024 * 256 - 2) + "\na"
chunk2 = "\n a"
result = parser.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"])
tm.assert_frame_equal(result, expected)
def test_file_handles_mmap(c_parser_only, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = c_parser_only
with open(csv1) as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
parser.read_csv(m)
assert not m.closed
m.close()
def test_file_binary_mode(c_parser_only):
# see gh-23779
parser = c_parser_only
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("1,2,3\n4,5,6")
with open(path, "rb") as f:
result = parser.read_csv(f, header=None)
tm.assert_frame_equal(result, expected)
def test_unix_style_breaks(c_parser_only):
# GH 11020
parser = c_parser_only
with tm.ensure_clean() as path:
with open(path, "w", newline="\n") as f:
f.write("blah\n\ncol_1,col_2,col_3\n\n")
result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c")
expected = DataFrame(columns=["col_1", "col_2", "col_3"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(
c_parser_only, data, thousands, decimal, float_precision
):
parser = c_parser_only
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data),
sep="|",
thousands=thousands,
decimal=decimal,
float_precision=float_precision,
)
tm.assert_frame_equal(result, expected)
def test_float_precision_options(c_parser_only):
# GH 17154, 36228
parser = c_parser_only
s = "foo\n243.164\n"
df = parser.read_csv(StringIO(s))
df2 = parser.read_csv(StringIO(s), float_precision="high")
tm.assert_frame_equal(df, df2)
df3 = parser.read_csv(StringIO(s), float_precision="legacy")
if IS64:
assert not df.iloc[0, 0] == df3.iloc[0, 0]
else:
assert df.iloc[0, 0] == df3.iloc[0, 0]
msg = "Unrecognized float_precision option: junk"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(s), float_precision="junk")
| bsd-3-clause |
bloyl/mne-python | mne/datasets/sleep_physionet/tests/test_physionet.py | 12 | 8388 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD Style.
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_array_equal
import mne
from mne.utils import requires_good_network
from mne.utils import requires_pandas, requires_version
from mne.datasets.sleep_physionet import age, temazepam
from mne.datasets.sleep_physionet._utils import _update_sleep_temazepam_records
from mne.datasets.sleep_physionet._utils import _update_sleep_age_records
from mne.datasets.sleep_physionet._utils import AGE_SLEEP_RECORDS
from mne.datasets.sleep_physionet._utils import TEMAZEPAM_SLEEP_RECORDS
@pytest.fixture(scope='session')
def physionet_tmpdir(tmpdir_factory):
"""Fixture exposing a temporary directory for testing."""
return str(tmpdir_factory.mktemp('physionet_files'))
class _FakeFetch:
def __init__(self):
self.call_args_list = list()
def __call__(self, *args, **kwargs):
self.call_args_list.append((args, kwargs))
@property
def call_count(self):
return len(self.call_args_list)
def _keep_basename_only(path_structure):
return np.vectorize(op.basename)(np.array(path_structure))
def _get_expected_url(name):
base = 'https://physionet.org/physiobank/database/sleep-edfx/'
midle = 'sleep-cassette/' if name.startswith('SC') else 'sleep-telemetry/'
return base + midle + '/' + name
def _get_expected_path(base, name):
return op.join(base, name)
def _check_mocked_function_calls(mocked_func, call_fname_hash_pairs,
base_path):
# Check mocked_func has been called the right amount of times.
assert mocked_func.call_count == len(call_fname_hash_pairs)
# Check it has been called with the right parameters in the right
# order.
for idx, current in enumerate(call_fname_hash_pairs):
call_args, call_kwargs = mocked_func.call_args_list[idx]
assert call_args[0] == _get_expected_url(current['name'])
assert call_args[1] == _get_expected_path(base_path, current['name'])
assert call_kwargs['hash_'] == current['hash']
assert call_kwargs['hash_type'] == 'sha1'
assert call_kwargs['print_destination'] is False
@pytest.mark.timeout(60)
@pytest.mark.xfail(strict=False)
@requires_good_network
@requires_pandas
@requires_version('xlrd', '0.9')
def test_run_update_age_records(tmpdir):
"""Test Sleep Physionet URL handling."""
import pandas as pd
fname = op.join(str(tmpdir), "records.csv")
_update_sleep_age_records(fname)
data = pd.read_csv(fname)
pd.testing.assert_frame_equal(data, pd.read_csv(AGE_SLEEP_RECORDS))
@pytest.mark.parametrize('subject', [39, 68, 69, 78, 79, 83])
def test_sleep_physionet_age_missing_subjects(physionet_tmpdir, subject,
download_is_error):
"""Test handling of missing subjects in Sleep Physionet age fetcher."""
params = {'path': physionet_tmpdir, 'update_path': False}
with pytest.raises(
ValueError, match='This dataset contains subjects 0 to 82'):
age.fetch_data(
subjects=[subject], recording=[1], on_missing='raise', **params)
with pytest.warns(RuntimeWarning,
match='This dataset contains subjects 0 to 82'):
age.fetch_data(
subjects=[subject], recording=[1], on_missing='warn', **params)
paths = age.fetch_data(
subjects=[subject], recording=[1], on_missing='ignore', **params)
assert paths == []
@pytest.mark.parametrize('subject,recording', [(13, 2), (36, 1), (52, 1)])
def test_sleep_physionet_age_missing_recordings(physionet_tmpdir, subject,
recording, download_is_error):
"""Test handling of missing recordings in Sleep Physionet age fetcher."""
params = {'path': physionet_tmpdir, 'update_path': False}
with pytest.raises(
ValueError, match=f'Requested recording {recording} for subject'):
age.fetch_data(subjects=[subject], recording=[recording],
on_missing='raise', **params)
with pytest.warns(RuntimeWarning,
match=f'Requested recording {recording} for subject'):
age.fetch_data(subjects=[subject], recording=[recording],
on_missing='warn', **params)
paths = age.fetch_data(subjects=[subject], recording=[recording],
on_missing='ignore', **params)
assert paths == []
def test_sleep_physionet_age(physionet_tmpdir, monkeypatch, download_is_error):
"""Test Sleep Physionet URL handling."""
# check download_is_error patching
params = {'path': physionet_tmpdir, 'update_path': False}
with pytest.raises(AssertionError, match='Test should not download'):
age.fetch_data(subjects=[0], recording=[1], **params)
# then patch
my_func = _FakeFetch()
monkeypatch.setattr(
mne.datasets.sleep_physionet._utils, '_fetch_file', my_func)
paths = age.fetch_data(subjects=[0], recording=[1], **params)
assert_array_equal(_keep_basename_only(paths),
[['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf']])
paths = age.fetch_data(subjects=[0, 1], recording=[1], **params)
assert_array_equal(_keep_basename_only(paths),
[['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'],
['SC4011E0-PSG.edf', 'SC4011EH-Hypnogram.edf']])
paths = age.fetch_data(subjects=[0], recording=[1, 2], **params)
assert_array_equal(_keep_basename_only(paths),
[['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'],
['SC4002E0-PSG.edf', 'SC4002EC-Hypnogram.edf']])
EXPECTED_CALLS = (
{'name': 'SC4001E0-PSG.edf',
'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'},
{'name': 'SC4001EC-Hypnogram.edf',
'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'},
{'name': 'SC4001E0-PSG.edf',
'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'},
{'name': 'SC4001EC-Hypnogram.edf',
'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'},
{'name': 'SC4011E0-PSG.edf',
'hash': '4d17451f7847355bcab17584de05e7e1df58c660'},
{'name': 'SC4011EH-Hypnogram.edf',
'hash': 'd582a3cbe2db481a362af890bc5a2f5ca7c878dc'},
{'name': 'SC4001E0-PSG.edf',
'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'},
{'name': 'SC4001EC-Hypnogram.edf',
'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'},
{'name': 'SC4002E0-PSG.edf',
'hash': 'c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d'},
{'name': 'SC4002EC-Hypnogram.edf',
'hash': '386230188a3552b1fc90bba0fb7476ceaca174b6'})
base_path = age.data_path(path=physionet_tmpdir)
_check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path)
@pytest.mark.xfail(strict=False)
@requires_good_network
@requires_pandas
@requires_version('xlrd', '0.9')
def test_run_update_temazepam_records(tmpdir):
"""Test Sleep Physionet URL handling."""
import pandas as pd
fname = op.join(str(tmpdir), "records.csv")
_update_sleep_temazepam_records(fname)
data = pd.read_csv(fname)
pd.testing.assert_frame_equal(
data, pd.read_csv(TEMAZEPAM_SLEEP_RECORDS))
def test_sleep_physionet_temazepam(physionet_tmpdir, monkeypatch):
"""Test Sleep Physionet URL handling."""
my_func = _FakeFetch()
monkeypatch.setattr(
mne.datasets.sleep_physionet._utils, '_fetch_file', my_func)
params = {'path': physionet_tmpdir, 'update_path': False}
paths = temazepam.fetch_data(subjects=[0], **params)
assert_array_equal(_keep_basename_only(paths),
[['ST7011J0-PSG.edf', 'ST7011JP-Hypnogram.edf']])
EXPECTED_CALLS = (
{'name': 'ST7011J0-PSG.edf',
'hash': 'b9d11484126ebff1884034396d6a20c62c0ef48d'},
{'name': 'ST7011JP-Hypnogram.edf',
'hash': 'ff28e5e01296cefed49ae0c27cfb3ebc42e710bf'})
base_path = temazepam.data_path(path=physionet_tmpdir)
_check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path)
with pytest.raises(
ValueError, match='This dataset contains subjects 0 to 21'):
paths = temazepam.fetch_data(subjects=[22], **params)
| bsd-3-clause |
pypot/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
raincoatrun/basemap | lib/mpl_toolkits/basemap/proj.py | 1 | 17835 | import numpy as np
from mpl_toolkits.basemap import pyproj
import math
from matplotlib.cbook import dedent
__version__ = '1.2.2'
_dg2rad = math.radians(1.)
_rad2dg = math.degrees(1.)
_cylproj = ['cyl','merc','mill','gall']
_pseudocyl = ['moll','kav7','eck4','robin','sinu','mbtfpq','vandg','hammer']
_upper_right_out_of_bounds = (
'the upper right corner of the plot is not in the map projection region')
_lower_left_out_of_bounds = (
'the lower left corner of the plot is not in the map projection region')
class Proj(object):
"""
peforms cartographic transformations (converts from longitude,latitude
to native map projection x,y coordinates and vice versa) using proj
(http://proj.maptools.org/)
Uses a pyrex generated C-interface to libproj.
__init__ method sets up projection information.
__call__ method compute transformations.
See docstrings for __init__ and __call__ for details.
Contact: Jeff Whitaker <jeffrey.s.whitaker@noaa.gov>
"""
def __init__(self,projparams,llcrnrlon,llcrnrlat,
urcrnrlon,urcrnrlat,urcrnrislatlon=True):
"""
initialize a Proj class instance.
Input 'projparams' is a dictionary containing proj map
projection control parameter key/value pairs.
See the proj documentation (http://www.remotesensing.org/proj/)
for details.
llcrnrlon,llcrnrlat are lon and lat (in degrees) of lower
left hand corner of projection region.
urcrnrlon,urcrnrlat are lon and lat (in degrees) of upper
right hand corner of projection region if urcrnrislatlon=True
(default). Otherwise, urcrnrlon,urcrnrlat are x,y in projection
coordinates (units meters), assuming the lower left corner is x=0,y=0.
"""
self.projparams = projparams
self.projection = projparams['proj']
# rmajor is the semi-major axis.
# rminor is the semi-minor axis.
# esq is eccentricity squared.
try:
self.rmajor = projparams['a']
self.rminor = projparams['b']
except:
try:
self.rmajor = projparams['R']
except:
self.rmajor = projparams['bR_a']
self.rminor = self.rmajor
if self.rmajor == self.rminor:
self.ellipsoid = False
else:
self.ellipsoid = True
self.flattening = (self.rmajor-self.rminor)/self.rmajor
self.esq = (self.rmajor**2 - self.rminor**2)/self.rmajor**2
self.llcrnrlon = llcrnrlon
self.llcrnrlat = llcrnrlat
if self.projection == 'cyl':
llcrnrx = llcrnrlon
llcrnry = llcrnrlat
elif self.projection == 'ob_tran':
self._proj4 = pyproj.Proj(projparams)
llcrnrx,llcrnry = self(llcrnrlon,llcrnrlat)
llcrnrx = _rad2dg*llcrnrx; llcrnry = _rad2dg*llcrnry
if llcrnrx < 0: llcrnrx = llcrnrx + 360
elif self.projection in 'ortho':
if (llcrnrlon == -180 and llcrnrlat == -90 and
urcrnrlon == 180 and urcrnrlat == 90):
self._fulldisk = True
self._proj4 = pyproj.Proj(projparams)
llcrnrx = -self.rmajor
llcrnry = -self.rmajor
self._width = 0.5*(self.rmajor+self.rminor)
self._height = 0.5*(self.rmajor+self.rminor)
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
self._fulldisk = False
self._proj4 = pyproj.Proj(projparams)
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if llcrnrx > 1.e20 or llcrnry > 1.e20:
raise ValueError(_lower_left_out_of_bounds)
elif self.projection == 'aeqd' and\
(llcrnrlon == -180 and llcrnrlat == -90 and urcrnrlon == 180 and\
urcrnrlat == 90):
self._fulldisk = True
self._proj4 = pyproj.Proj(projparams)
# raise an exception for ellipsoids - there appears to be a bug
# in proj4 that causes the inverse transform to fail for points
# more than 90 degrees of arc away from center point for ellipsoids
# (works fine for spheres) - below is an example
#from pyproj import Proj
#p1 = Proj(proj='aeqd',a=6378137.00,b=6356752.3142,lat_0=0,lon_0=0)
#x,y= p1(91,0)
#lon,lat = p1(x,y,inverse=True) # lon is 89 instead of 91
if self.ellipsoid:
msg = dedent("""
full disk (whole world) Azimuthal Equidistant projection can
only be drawn for a perfect sphere""")
raise ValueError(msg)
llcrnrx = -np.pi*self.rmajor
llcrnry = -np.pi*self.rmajor
self._width = -llcrnrx
self._height = -llcrnry
urcrnrx = -llcrnrx
urcrnry = -llcrnry
elif self.projection == 'geos':
self._proj4 = pyproj.Proj(projparams)
# find major and minor axes of ellipse defining map proj region.
# h is measured from surface of earth at equator.
h = projparams['h'] + self.rmajor
# latitude of horizon on central meridian
lonmax = 90.-(180./np.pi)*np.arcsin(self.rmajor/h)
# longitude of horizon on equator
latmax = 90.-(180./np.pi)*np.arcsin(self.rminor/h)
# truncate to nearest hundredth of a degree (to make sure
# they aren't slightly over the horizon)
latmax = int(100*latmax)/100.
lonmax = int(100*lonmax)/100.
# width and height of visible projection
P = pyproj.Proj(proj='geos',a=self.rmajor,\
b=self.rminor,lat_0=0,lon_0=0,h=projparams['h'])
x1,y1 = P(0.,latmax); x2,y2 = P(lonmax,0.)
width = x2; height = y1
self._height = height
self._width = width
if (llcrnrlon == -180 and llcrnrlat == -90 and
urcrnrlon == 180 and urcrnrlat == 90):
self._fulldisk = True
llcrnrx = -width
llcrnry = -height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
self._fulldisk = False
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if llcrnrx > 1.e20 or llcrnry > 1.e20:
raise ValueError(_lower_left_out_of_bounds)
elif self.projection == 'nsper':
self._proj4 = pyproj.Proj(projparams)
# find major and minor axes of ellipse defining map proj region.
# h is measured from surface of earth at equator.
h = projparams['h'] + self.rmajor
# latitude of horizon on central meridian
lonmax = 90.-(180./np.pi)*np.arcsin(self.rmajor/h)
# longitude of horizon on equator
latmax = 90.-(180./np.pi)*np.arcsin(self.rmajor/h)
# truncate to nearest hundredth of a degree (to make sure
# they aren't slightly over the horizon)
latmax = int(100*latmax)/100.
lonmax = int(100*lonmax)/100.
# width and height of visible projection
P = pyproj.Proj(proj='nsper',a=self.rmajor,\
b=self.rminor,lat_0=0,lon_0=0,h=projparams['h'])
x1,y1 = P(0.,latmax); x2,y2 = P(lonmax,0.)
width = x2; height = y1
self._height = height
self._width = width
if (llcrnrlon == -180 and llcrnrlat == -90 and
urcrnrlon == 180 and urcrnrlat == 90):
self._fulldisk = True
llcrnrx = -width
llcrnry = -height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
self._fulldisk = False
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if llcrnrx > 1.e20 or llcrnry > 1.e20:
raise ValueError(_lower_left_out_of_bounds)
elif self.projection in _pseudocyl:
self._proj4 = pyproj.Proj(projparams)
xtmp,urcrnry = self(projparams['lon_0'],90.)
urcrnrx,xtmp = self(projparams['lon_0']+180.,0)
llcrnrx = -urcrnrx
llcrnry = -urcrnry
if self.ellipsoid and self.projection in ['kav7','eck4','mbtfpq']:
msg = "this projection can only be drawn for a perfect sphere"
raise ValueError(msg)
else:
self._proj4 = pyproj.Proj(projparams)
llcrnrx, llcrnry = self(llcrnrlon,llcrnrlat)
if self.projection == 'aeqd': self._fulldisk=False
# compute x_0, y_0 so ll corner of domain is x=0,y=0.
# note that for 'cyl' x,y == lon,lat
if self.projection != 'ob_tran':
self.projparams['x_0']=-llcrnrx
self.projparams['y_0']=-llcrnry
# reset with x_0, y_0.
if self.projection not in ['cyl','ob_tran']:
self._proj4 = pyproj.Proj(projparams)
llcrnry = 0.
llcrnrx = 0.
elif self.projection != 'ob_tran':
llcrnrx = llcrnrlon
llcrnry = llcrnrlat
if urcrnrislatlon:
self.urcrnrlon = urcrnrlon
self.urcrnrlat = urcrnrlat
if self.projection not in ['ortho','geos','nsper','aeqd'] + _pseudocyl:
urcrnrx,urcrnry = self(urcrnrlon,urcrnrlat)
if self.projection == 'ob_tran':
urcrnrx = _rad2dg*urcrnrx; urcrnry = _rad2dg*urcrnry
if urcrnrx < 0: urcrnrx = urcrnrx + 360
elif self.projection in ['ortho','geos','nsper','aeqd']:
if self._fulldisk:
urcrnrx = 2.*self._width
urcrnry = 2.*self._height
else:
urcrnrx,urcrnry = self(urcrnrlon,urcrnrlat)
if urcrnrx > 1.e20 or urcrnry > 1.e20:
raise ValueError(_upper_right_out_of_bounds)
elif self.projection in _pseudocyl:
xtmp,urcrnry = self(projparams['lon_0'],90.)
urcrnrx,xtmp = self(projparams['lon_0']+180.,0)
else:
urcrnrx = urcrnrlon
urcrnry = urcrnrlat
urcrnrlon, urcrnrlat = self(urcrnrx, urcrnry, inverse=True)
self.urcrnrlon = urcrnrlon
self.urcrnrlat = urcrnrlat
# corners of domain.
self.llcrnrx = llcrnrx
self.llcrnry = llcrnry
self.urcrnrx = urcrnrx
self.urcrnry = urcrnry
if urcrnrx > llcrnrx:
self.xmin = llcrnrx
self.xmax = urcrnrx
else:
self.xmax = llcrnrx
self.xmin = urcrnrx
if urcrnry > llcrnry:
self.ymin = llcrnry
self.ymax = urcrnry
else:
self.ymax = llcrnry
self.ymin = urcrnry
def __call__(self, *args, **kw):
# x,y,inverse=False):
"""
Calling a Proj class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y native map projection
coordinates (in meters). If optional keyword 'inverse' is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection ('cyl'), this
does nothing (i.e. x,y == lon,lat).
lon,lat can be either scalar floats or N arrays.
"""
if len(args) == 1:
xy = args[0]
onearray = True
else:
x,y = args
onearray = False
if self.projection == 'cyl': # for cyl x,y == lon,lat
if onearray:
return xy
else:
return x,y
inverse = kw.get('inverse', False)
if onearray:
outxy = self._proj4(xy, inverse=inverse)
else:
outx,outy = self._proj4(x, y, inverse=inverse)
if inverse:
if self.projection in ['merc','mill','gall']:
if self.projection == 'merc':
coslat = math.cos(math.radians(self.projparams['lat_ts']))
sinlat = math.sin(math.radians(self.projparams['lat_ts']))
else:
coslat = 1.
sinlat = 0.
# radius of curvature of the ellipse perpendicular to
# the plane of the meridian.
rcurv = self.rmajor*coslat/math.sqrt(1.-self.esq*sinlat**2)
if onearray:
outxy[:,0] = _rad2dg*(xy[:,0]/rcurv) + self.llcrnrlon
else:
try: # x a scalar or an array
outx = _rad2dg*(x/rcurv) + self.llcrnrlon
except: # x a sequence
outx = [_rad2dg*(xi/rcurv) + self.llcrnrlon for xi in x]
else:
if self.projection in ['merc','mill','gall']:
if self.projection == 'merc':
coslat = math.cos(math.radians(self.projparams['lat_ts']))
sinlat = math.sin(math.radians(self.projparams['lat_ts']))
else:
coslat = 1.
sinlat = 0.
# radius of curvature of the ellipse perpendicular to
# the plane of the meridian.
rcurv = self.rmajor*coslat/math.sqrt(1.-self.esq*sinlat**2)
if onearray:
outxy[:,0] = rcurv*_dg2rad*(xy[:,0]-self.llcrnrlon)
else:
try: # x is a scalar or an array
outx = rcurv*_dg2rad*(x-self.llcrnrlon)
except: # x is a sequence.
outx = [rcurv*_dg2rad*(xi-self.llcrnrlon) for xi in x]
if onearray:
return outxy
else:
return outx, outy
def makegrid(self,nx,ny,returnxy=False):
"""
return arrays of shape (ny,nx) containing lon,lat coordinates of
an equally spaced native projection grid.
if returnxy=True, the x,y values of the grid are returned also.
"""
dx = (self.urcrnrx-self.llcrnrx)/(nx-1)
dy = (self.urcrnry-self.llcrnry)/(ny-1)
x = self.llcrnrx+dx*np.indices((ny,nx),np.float32)[1,:,:]
y = self.llcrnry+dy*np.indices((ny,nx),np.float32)[0,:,:]
lons, lats = self(x, y, inverse=True)
if returnxy:
return lons, lats, x, y
else:
return lons, lats
def makegrid3d(self,nx,ny,returnxy=False):
"""
return array of shape (ny,nx, 2) containing lon,lat coordinates of
an equally spaced native projection grid.
if returnxy=True, the x,y values of the grid are returned also.
"""
dx = (self.urcrnrx-self.llcrnrx)/(nx-1)
dy = (self.urcrnry-self.llcrnry)/(ny-1)
xy = np.empty((ny,nx,2), np.float64)
xy[...,0] = self.llcrnrx+dx*np.indices((ny,nx),np.float32)[1,:,:]
xy[...,1] = self.llcrnry+dy*np.indices((ny,nx),np.float32)[0,:,:]
lonlat = self(xy, inverse=True)
if returnxy:
return lonlat, xy
else:
return lonlat
if __name__ == "__main__":
params = {}
params['proj'] = 'lcc'
params['R'] = 6371200
params['lat_1'] = 50
params['lat_2'] = 50
params['lon_0'] = -107
nx = 349; ny = 277; dx = 32463.41; dy = dx
awips221 = Proj(params,-145.5,1.0,(nx-1)*dx,(ny-1)*dy,urcrnrislatlon=False)
# AWIPS grid 221 parameters
# (from http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html)
llcornerx, llcornery = awips221(-145.5,1.)
# find 4 lon/lat corners of AWIPS grid 221.
llcornerx = 0.; llcornery = 0.
lrcornerx = dx*(nx-1); lrcornery = 0.
ulcornerx = 0.; ulcornery = dy*(ny-1)
urcornerx = dx*(nx-1); urcornery = dy*(ny-1)
llcornerlon, llcornerlat = awips221(llcornerx, llcornery, inverse=True)
lrcornerlon, lrcornerlat = awips221(lrcornerx, lrcornery, inverse=True)
urcornerlon, urcornerlat = awips221(urcornerx, urcornery, inverse=True)
ulcornerlon, ulcornerlat = awips221(ulcornerx, ulcornery, inverse=True)
import sys
sys.stdout.write('4 corners of AWIPS grid 221:\n')
sys.stdout.write('%s %s\n' % llcornerlon, llcornerlat)
sys.stdout.write('%s %s\n' % lrcornerlon, lrcornerlat)
sys.stdout.write('%s %s\n' % urcornerlon, urcornerlat)
sys.stdout.write('%s %s\n' % ulcornerlon, ulcornerlat)
sys.stdout.write('from GRIB docs\n')
sys.stdout.write('(http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html)\n')
sys.stdout.write(' -145.5 1.0\n')
sys.stdout.write(' -68.318 0.897\n')
sys.stdout.write(' -2.566 46.352\n')
sys.stdout.write(' 148.639 46.635\n')
# compute lons and lats for the whole AWIPS grid 221 (377x249).
import time; t1 = time.clock()
lons, lats = awips221.makegrid(nx,ny)
t2 = time.clock()
sys.stdout.write('compute lats/lons for all points on AWIPS 221 grid (%sx%s)\n' %(nx,ny))
sys.stdout.write('max/min lons\n')
sys.stdout.write('%s %s\n' % min(np.ravel(lons)),max(np.ravel(lons)))
sys.stdout.write('max/min lats\n')
sys.stdout.write('%s %s\n' % min(np.ravel(lats)),max(np.ravel(lats)))
sys.stdout.write('took %s secs\n' % t2-t1)
sys.stdout.write('Same thing but with a single 3-D array\n')
t1 = time.clock()
lonlat, xy = awips221.makegrid3d(nx,ny, returnxy=True)
t2 = time.clock()
sys.stdout.write('took %s secs\n' % t2-t1)
assert (lons==lonlat[...,0]).all(), "The longitudes are different"
assert (lats==lonlat[...,1]).all(), "The latitudes are different"
| gpl-2.0 |
florian-f/sklearn | sklearn/hmm.py | 3 | 45124 | # Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. If nobody volunteers to write documentation and
make it more stable, this module will be removed in version 0.11.
"""
import string
import numpy as np
from .utils import check_random_state
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publically.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, obs):
"""Compute the log probability under the model and compute posteriors
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence `obs`
posteriors: array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the `obs`
See Also
--------
eval : Compute the log probability under the model and posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
_, posteriors = self.eval(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.eval(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences (shape (n_i, n_features)).
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data, or
decreasing `covars_prior`.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats["trans"] += np.exp(logsumexp(lneta, 0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / cv_den
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asanyarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input containes negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can \
contain any combination of 's' for startprob, 't' for transmat, 'm' \
for means, and 'c' for covars, etc. Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,'m' for
means, and 'c' for covars, etc. Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.eval(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| bsd-3-clause |
ZiqianXY/MLEN | src/p0_titanic_survival_exploration/titanic_visualizations.py | 24 | 5425 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
# Create outcomes DataFrame
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Remove NaN values from Age data
all_data = all_data[~np.isnan(all_data[key])]
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
| mit |
BubuLK/sfepy | probe.py | 5 | 9831 | #!/usr/bin/env python
# 12.01.2007, c
"""
Probe finite element solutions in points defined by various geometrical probes.
Generation mode
---------------
python probe.py [generation options] <input file> <results file>
Probe the data in the results file corresponding to the problem defined in the
input file. The input file options must contain 'gen_probes' and 'probe_hook'
keys, pointing to proper functions accessible from the input file scope.
For each probe returned by `gen_probes()` a data plot figure and a text
file with the data plotted are saved, see the options below.
Generation options
------------------
-o, --auto-dir, --same-dir, -f, --only-names, -s
Postprocessing mode
-------------------
python probe.py [postprocessing options] <probe file> <figure file>
Read a previously probed data from the probe text file, re-plot them,
and integrate them along the probe.
Postprocessing options
----------------------
--postprocess, --radial, --only-names
Notes
-----
For extremely thin hexahedral elements the Newton's iteration for finding the
reference element coordinates might converge to a spurious solution outside
of the element. To obtain some values even in this case, try increasing the
--close-limit option value.
"""
from __future__ import absolute_import
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import sfepy
from sfepy.base.base import output, assert_
from sfepy.base.ioutils import edit_filename
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.discrete.fem import MeshIO
from sfepy.discrete.probes import write_results, read_results
import six
helps = {
'debug':
'automatically start debugger when an exception is raised',
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'output_format' :
'output figure file format (supported by the matplotlib backend used) '\
'[default: %(default)s]',
'auto_dir' :
'the directory of the results file is determined automatically using the '\
'"output_dir" option in input file options',
'same_dir' :
'store the probe figures/data in the directory of the results file',
'only_names' :
'probe only named data',
'step' :
'probe the given time step',
'close_limit' :
'maximum limit distance of a point from the closest element allowed'
' for extrapolation. [default: %(default)s]',
'postprocess' :
'postprocessing mode',
'radial' :
'assume radial integration',
}
def generate_probes(filename_input, filename_results, options,
conf=None, problem=None, probes=None, labels=None,
probe_hooks=None):
"""
Generate probe figures and data files.
"""
if conf is None:
required, other = get_standard_keywords()
conf = ProblemConf.from_file(filename_input, required, other)
opts = conf.options
if options.auto_dir:
output_dir = opts.get_('output_dir', '.')
filename_results = os.path.join(output_dir, filename_results)
output('results in: %s' % filename_results)
io = MeshIO.any_from_filename(filename_results)
step = options.step if options.step >= 0 else io.read_last_step()
all_data = io.read_data(step)
output('loaded:', list(all_data.keys()))
output('from step:', step)
if options.only_names is None:
data = all_data
else:
data = {}
for key, val in six.iteritems(all_data):
if key in options.only_names:
data[key] = val
if problem is None:
problem = Problem.from_conf(conf,
init_equations=False, init_solvers=False)
if probes is None:
gen_probes = conf.get_function(conf.options.gen_probes)
probes, labels = gen_probes(problem)
if probe_hooks is None:
probe_hooks = {None : conf.get_function(conf.options.probe_hook)}
if options.output_filename_trunk is None:
options.output_filename_trunk = problem.ofn_trunk
filename_template = options.output_filename_trunk \
+ ('_%%d.%s' % options.output_format)
if options.same_dir:
filename_template = os.path.join(os.path.dirname(filename_results),
filename_template)
output_dir = os.path.dirname(filename_results)
for ip, probe in enumerate(probes):
output(ip, probe.name)
probe.set_options(close_limit=options.close_limit)
for key, probe_hook in six.iteritems(probe_hooks):
out = probe_hook(data, probe, labels[ip], problem)
if out is None: continue
if isinstance(out, tuple):
fig, results = out
else:
fig = out
if key is not None:
filename = filename_template % (key, ip)
else:
filename = filename_template % ip
if fig is not None:
if isinstance(fig, dict):
for fig_name, fig_fig in six.iteritems(fig):
fig_filename = edit_filename(filename,
suffix='_' + fig_name)
fig_fig.savefig(fig_filename)
output('figure ->', os.path.normpath(fig_filename))
else:
fig.savefig(filename)
output('figure ->', os.path.normpath(filename))
if results is not None:
txt_filename = edit_filename(filename, new_ext='.txt')
write_results(txt_filename, probe, results)
output('data ->', os.path.normpath(txt_filename))
def integrate_along_line(x, y, is_radial=False):
"""
Integrate numerically (trapezoidal rule) a function :math:`y=y(x)`.
If is_radial is True, multiply each :math:`y` by :math:`4 \pi x^2`.
"""
dx = nm.diff(x)
ay = 0.5 * (y[:-1] + y[1:])
if is_radial:
ax = 0.5 * (x[:-1] + x[1:])
val = 4.0 * nm.pi * nm.sum(ay * dx * (ax**2))
else:
val = nm.sum(ay * dx)
return val
def postprocess(filename_input, filename_results, options):
"""
Postprocess probe data files - replot, integrate data.
"""
from matplotlib import pyplot as plt
header, results = read_results(filename_input,
only_names=options.only_names)
output(header)
fig = plt.figure()
for name, result in six.iteritems(results):
pars, vals = result[:, 0], result[:, 1]
ii = nm.where(nm.isfinite(vals))[0]
# Nans only at the edges.
assert_(nm.diff(ii).sum() == (len(ii)-1))
val = integrate_along_line(pars[ii], vals[ii], options.radial)
label = r'%s: $\int\ %s' % (name, name)
if options.radial:
label += ' (r)'
label += '$ = %.5e'% val
plt.plot(pars, vals, label=label, lw=0.2, marker='+', ms=1)
plt.ylabel('probed data')
plt.xlabel('probe coordinate')
output(label)
plt.legend()
fig.savefig(filename_results)
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version',
version='%(prog)s ' + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument('-o', metavar='filename',
action='store', dest='output_filename_trunk',
default=None, help=helps['filename'])
parser.add_argument('--auto-dir',
action='store_true', dest='auto_dir',
default=False, help=helps['auto_dir'])
parser.add_argument('--same-dir',
action='store_true', dest='same_dir',
default=False, help=helps['same_dir'])
parser.add_argument('-f', '--format', metavar='format',
action='store', dest='output_format',
default='png', help=helps['output_format'])
parser.add_argument('--only-names', metavar='list of names',
action='store', dest='only_names',
default=None, help=helps['only_names'])
parser.add_argument('-s', '--step', type=int, metavar='step',
action='store', dest='step',
default=0, help=helps['step'])
parser.add_argument('-c', '--close-limit', type=float, metavar='distance',
action='store', dest='close_limit',
default=0.1, help=helps['close_limit'])
parser.add_argument('-p', '--postprocess',
action='store_true', dest='postprocess',
default=False, help=helps['postprocess'])
parser.add_argument('--radial',
action='store_true', dest='radial',
default=False, help=helps['radial'])
parser.add_argument('filename_in')
parser.add_argument('filename_out')
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
filename_input = options.filename_in
filename_results = options.filename_out
if options.only_names is not None:
options.only_names = options.only_names.split(',')
output.prefix = 'probe:'
if options.postprocess:
postprocess(filename_input, filename_results, options)
else:
generate_probes(filename_input, filename_results, options)
if __name__ == '__main__':
main()
| bsd-3-clause |
IQSS/geoconnect | gc_apps/gis_tabular/tab_file_stats.py | 1 | 6000 | """
Gather tabular file information: number of rows, column names, etc
"""
from csv import QUOTE_NONNUMERIC
import pandas as pd
from django.core.files.base import ContentFile
from gc_apps.gis_tabular.models import TabularFileInfo
from gc_apps.geo_utils.file_field_helper import get_file_path_or_url
from gc_apps.geo_utils.tabular_util import normalize_colname
from gc_apps.geo_utils.msg_util import msg
import logging
LOGGER = logging.getLogger(__name__)
NUM_PREVIEW_ROWS = 5
class TabFileStats(object):
"""Gather tabular file information: number of rows, column names, etc"""
def __init__(self, file_object, delim=',', tabular_info=None):
assert hasattr(file_object, 'read'),\
"TabFileStats. file_object does not have .read() function: %s" % file_object
self.file_object = file_object
self.delimiter = str(delim)
#print 'init delim:', self.delimiter, len(self.delimiter)
#'\t' #str(delim) #b',' #delim
self.tabular_info = tabular_info
self.column_names = []
self.num_rows = 0
self.num_cols = 0
self.preview_rows = []
self.error_found = False
self.error_message = None
self.stats_collected = False
self.rename_columns()
self.collect_stats()
self.update_tabular_info_object()
def has_error(self):
"""Was there an error?"""
return self.error_found
def add_error(self, message):
"""
Save error message encountered in the process of
collecting stats or updating the tabularFileInfo object
"""
self.error_found = True
self.error_message = message
@staticmethod
def create_from_tabular_info(tabular_info):
assert isinstance(tabular_info, TabularFileInfo)\
, 'tabular_info must be a TabularFileInfo object'
assert tabular_info.dv_file is not None, "tabular_info.file cannot be None"
# tabular_info.dv_file.file.name\
return TabFileStats(file_object=tabular_info.dv_file,
delim=tabular_info.delimiter,
tabular_info=tabular_info)
def rename_columns(self):
if self.has_error():
return
try:
df = pd.read_csv(get_file_path_or_url(self.file_object),
sep=self.delimiter)
except pd.parser.CParserError as ex_obj:
err_msg = ('Could not process the file. '
'At least one row had too many values. '
'(error: %s)') % ex_obj.message
self.add_error(err_msg)
return
count = 0
columns_renamed = {}
for column in df.columns.values.tolist():
normalized = normalize_colname(colname=column,
position=count + 1)
# Note, normalize_colname returns unicode
# For comparison, get a unicode version of the
# pandas column.
# We don't care that column_uni is imperfect/may
# remove characters. Only used for the comparison
# (this is not pretty)
column_uni = column.decode('utf8', 'ignore')
if column_uni != normalized:
columns_renamed[column] = normalized
count += 1
if len(columns_renamed) > 0:
df.rename(columns=columns_renamed, inplace=True)
# http://stackoverflow.com/questions/36519086/pandas-how-to-get-rid-of-unnamed-column-in-a-dataframe
fh_csv = df.to_csv(quoting=QUOTE_NONNUMERIC,
sep=self.delimiter,
index=False)
content_file = ContentFile(fh_csv)
# Save the ContentFile in the tabular_info object
# ----------------------------------
self.tabular_info.dv_file.save(self.tabular_info.datafile_label,
content_file)
def collect_stats(self):
"""
Open the file: collect num_rows, num_cols and preview_row data
"""
if self.has_error():
return
try:
df = pd.read_csv(get_file_path_or_url(self.file_object),
sep=self.delimiter)
except pd.parser.CParserError as ex_obj:
err_msg = ('Could not process the file. '
'At least one row had too many values. '
'(error: %s)') % ex_obj.message
self.add_error(err_msg)
return
self.special_case_col_formatting(df)
self.column_names = df.columns.values.tolist()
self.num_cols = len(self.column_names)
self.num_rows = len(df.index)
self.preview_rows = df.head(NUM_PREVIEW_ROWS).values.tolist()
if not self.preview_rows or len(self.preview_rows) == 0:
self.add_error('No data rows in the file')
return
self.stats_collected = True
def special_case_col_formatting(self, df):
"""Will eventually need to be factored out"""
if df is None:
return
# Treat census block groups as string instead of numbers
# - 12-digit numeric code that may receive zero-padding
#
keep_as_string_cols = ['BG_ID_10', 'CT_ID_10']
for col_name in keep_as_string_cols:
if col_name in df.columns:
df[col_name] = df[col_name].astype(str)
def update_tabular_info_object(self):
"""
If one is specified update the tabular_info object.
This is usually a TabularFileInfo object
"""
if self.has_error():
return
if not self.tabular_info:
return
self.tabular_info.num_rows = self.num_rows
self.tabular_info.num_columns = self.num_cols
self.tabular_info.column_names = self.column_names
self.tabular_info.save()
| apache-2.0 |
cactusbin/nyt | matplotlib/examples/pylab_examples/annotation_demo.py | 6 | 5582 | """
Some examples of how to annotate points in figures. You specify an
annotation point xy=(x,y) and a text point xytext=(x,y) for the
annotated points and text location, respectively. Optionally, you can
specify the coordinate system of xy and xytext with one of the
following strings for xycoords and textcoords (default is 'data')
'figure points' : points from the lower left corner of the figure
'figure pixels' : pixels from the lower left corner of the figure
'figure fraction' : 0,0 is lower left of figure and 1,1 is upper, right
'axes points' : points from lower left corner of axes
'axes pixels' : pixels from lower left corner of axes
'axes fraction' : 0,1 is lower left of axes and 1,1 is upper right
'offset points' : Specify an offset (in points) from the xy value
'data' : use the axes data coordinate system
Optionally, you can specify arrow properties which draws and arrow
from the text to the annotated point by giving a dictionary of arrow
properties
Valid keys are
width : the width of the arrow in points
frac : the fraction of the arrow length occupied by the head
headwidth : the width of the base of the arrow head in points
shrink : move the tip and base some percent away from the
annotated point and text
any key for matplotlib.patches.polygon (eg facecolor)
For physical coordinate systems (points or pixels) the origin is the
(bottom, left) of the figure or axes. If the value is negative,
however, the origin is from the (right, top) of the figure or axes,
analogous to negative indexing of sequences.
"""
from matplotlib.pyplot import figure, show
from matplotlib.patches import Ellipse
import numpy as np
if 1:
# if only one location is given, the text and xypoint being
# annotated are assumed to be the same
fig = figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-3,5))
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('axes center', xy=(.5, .5), xycoords='axes fraction',
horizontalalignment='center', verticalalignment='center')
ax.annotate('pixels', xy=(20, 20), xycoords='figure pixels')
ax.annotate('points', xy=(100, 300), xycoords='figure points')
ax.annotate('offset', xy=(1, 1), xycoords='data',
xytext=(-15, 10), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='bottom',
)
ax.annotate('local max', xy=(3, 1), xycoords='data',
xytext=(0.8, 0.95), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right', verticalalignment='top',
)
ax.annotate('a fractional title', xy=(.025, .975),
xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=20)
# use negative points or pixels to specify from right, top -10, 10
# is 10 points to the left of the right side of the axes and 10
# points above the bottom
ax.annotate('bottom right (points)', xy=(-10, 10),
xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
fontsize=20)
if 1:
# you can specify the xypoint and the xytext in different
# positions and coordinate systems, and optionally turn on a
# connecting line and mark the point with a marker. Annotations
# work on polar axes too. In the example below, the xy point is
# in native coordinates (xycoords defaults to 'data'). For a
# polar axes, this is in (theta, radius) space. The text in this
# example is placed in the fractional figure coordinate system.
# Text keyword args like horizontal and vertical alignment are
# respected
fig = figure()
ax = fig.add_subplot(111, polar=True)
r = np.arange(0,1,0.001)
theta = 2*2*np.pi*r
line, = ax.plot(theta, r, color='#ee8d18', lw=3)
ind = 800
thisr, thistheta = r[ind], theta[ind]
ax.plot([thistheta], [thisr], 'o')
ax.annotate('a polar annotation',
xy=(thistheta, thisr), # theta, radius
xytext=(0.05, 0.05), # fraction, fraction
textcoords='figure fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='bottom',
)
if 1:
# You can also use polar notation on a cartesian axes. Here the
# native coordinate system ('data') is cartesian, so you need to
# specify the xycoords and textcoords as 'polar' if you want to
# use (theta, radius)
el = Ellipse((0,0), 10, 20, facecolor='r', alpha=0.5)
fig = figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_artist(el)
el.set_clip_box(ax.bbox)
ax.annotate('the top',
xy=(np.pi/2., 10.), # theta, radius
xytext=(np.pi/3, 20.), # theta, radius
xycoords='polar',
textcoords='polar',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
)
ax.set_xlim(-20, 20)
ax.set_ylim(-20, 20)
show()
| unlicense |
palash1992/GEM | tests/test_karate.py | 1 | 2625 | '''
Run the graph embedding methods on Karate graph and evaluate them on
graph reconstruction and visualization. Please copy the
gem/data/karate.edgelist to the working directory
'''
import matplotlib.pyplot as plt
from time import time
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
from gem.evaluation import evaluate_graph_reconstruction as gr
from gem.embedding.gf import GraphFactorization
from gem.embedding.hope import HOPE
from gem.embedding.lap import LaplacianEigenmaps
from gem.embedding.lle import LocallyLinearEmbedding
from gem.embedding.node2vec import node2vec
from gem.embedding.sdne import SDNE
# File that contains the edges. Format: source target
# Optionally, you can add weights as third column: source target weight
edge_f = 'data/karate.edgelist'
# Specify whether the edges are directed
isDirected = True
# Load graph
G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)
G = G.to_directed()
models = []
# Load the models you want to run
models.append(GraphFactorization(d=2, max_iter=50000, eta=1 * 10**-4, regu=1.0))
models.append(HOPE(d=4, beta=0.01))
models.append(LaplacianEigenmaps(d=2))
models.append(LocallyLinearEmbedding(d=2))
models.append(node2vec(d=2, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))
models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, xeta=0.01,n_batch=100,
modelfile=['enc_model.json', 'dec_model.json'],
weightfile=['enc_weights.hdf5', 'dec_weights.hdf5']))
# For each model, learn the embedding and evaluate on graph reconstruction and visualization
for embedding in models:
print ('Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges()))
t1 = time()
# Learn embedding - accepts a networkx graph or file with edge list
Y, t = embedding.learn_embedding(graph=G, edge_f=None, is_weighted=True, no_python=True)
print (embedding._method_name+':\n\tTraining time: %f' % (time() - t1))
# Evaluate on graph reconstruction
MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G, embedding, Y, None)
#---------------------------------------------------------------------------------
print(("\tMAP: {} \t preccision curve: {}\n\n\n\n"+'-'*100).format(MAP,prec_curv[:5]))
#---------------------------------------------------------------------------------
# Visualize
viz.plot_embedding2D(embedding.get_embedding(), di_graph=G, node_colors=None)
plt.show()
plt.clf()
| bsd-3-clause |
Unidata/MetPy | v0.6/_downloads/NEXRAD_Level_3_File.py | 2 | 1569 | # Copyright (c) 2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
NEXRAD Level 3 File
===================
Use MetPy to read information from a NEXRAD Level 3 (NIDS product) file and plot
"""
import matplotlib.pyplot as plt
import numpy as np
from metpy.cbook import get_test_data
from metpy.io import Level3File
from metpy.plots import add_metpy_logo, ctables
###########################################
fig, axes = plt.subplots(1, 2, figsize=(15, 8))
add_metpy_logo(fig, 1200, 85, size='large')
for v, ctable, ax in zip(('N0Q', 'N0U'), ('NWSReflectivity', 'NWSVelocity'), axes):
# Open the file
name = get_test_data('nids/KOUN_SDUS54_{}TLX_201305202016'.format(v), as_file_obj=False)
f = Level3File(name)
# Pull the data out of the file object
datadict = f.sym_block[0][0]
# Turn into an array, then mask
data = np.ma.array(datadict['data'])
data[data == 0] = np.ma.masked
# Grab azimuths and calculate a range based on number of gates
az = np.array(datadict['start_az'] + [datadict['end_az'][-1]])
rng = np.linspace(0, f.max_range, data.shape[-1] + 1)
# Convert az,range to x,y
xlocs = rng * np.sin(np.deg2rad(az[:, np.newaxis]))
ylocs = rng * np.cos(np.deg2rad(az[:, np.newaxis]))
# Plot the data
norm, cmap = ctables.registry.get_with_steps(ctable, 16, 16)
ax.pcolormesh(xlocs, ylocs, data, norm=norm, cmap=cmap)
ax.set_aspect('equal', 'datalim')
ax.set_xlim(-40, 20)
ax.set_ylim(-30, 30)
plt.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/examples/plot_johnson_lindenstrauss_bound.py | 67 | 7474 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
sonnyhu/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 73 | 6451 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
wiki2014/Learning-Summary | alps/cts/apps/CameraITS/tests/scene1/test_param_sensitivity.py | 3 | 2539 | # Copyright 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.caps
import its.device
import its.objects
import its.target
import pylab
import os.path
import matplotlib
import matplotlib.pyplot
def main():
"""Test that the android.sensor.sensitivity parameter is applied.
"""
NAME = os.path.basename(__file__).split(".")[0]
NUM_STEPS = 5
sensitivities = None
r_means = []
g_means = []
b_means = []
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.compute_target_exposure(props) and
its.caps.per_frame_control(props))
expt,_ = its.target.get_target_exposure_combos(cam)["midSensitivity"]
sens_range = props['android.sensor.info.sensitivityRange']
sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
sensitivities = [sens_range[0] + i * sens_step for i in range(NUM_STEPS)]
for s in sensitivities:
req = its.objects.manual_capture_request(s, expt)
cap = cam.do_capture(req)
img = its.image.convert_capture_to_rgb_image(cap)
its.image.write_image(
img, "%s_iso=%04d.jpg" % (NAME, s))
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
rgb_means = its.image.compute_image_means(tile)
r_means.append(rgb_means[0])
g_means.append(rgb_means[1])
b_means.append(rgb_means[2])
# Draw a plot.
pylab.plot(sensitivities, r_means, 'r')
pylab.plot(sensitivities, g_means, 'g')
pylab.plot(sensitivities, b_means, 'b')
pylab.ylim([0,1])
matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
# Test for pass/fail: check that each shot is brighter than the previous.
for means in [r_means, g_means, b_means]:
for i in range(len(means)-1):
assert(means[i+1] > means[i])
if __name__ == '__main__':
main()
| gpl-3.0 |
petosegan/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
gaborfodor/TNP | extract_features.py | 1 | 17673 | import pandas as pd
import numpy as np
from os import listdir, makedirs, getcwd, remove
from os.path import isfile, join, abspath, exists, isdir
import datetime as dt
MIN_FREQ, LOCATION_CAT, FEATURE_CAT, FAULT_LOOKBACK, SHIFT = 20, 4, 5, 10, 1
print MIN_FREQ, LOCATION_CAT, FEATURE_CAT, FAULT_LOOKBACK, SHIFT
base_path = getcwd()
data_path = join(base_path, 'data')
feature_path = join(base_path, 'extracted_features')
if not exists(feature_path):
makedirs(feature_path)
time0 = dt.datetime.now()
# ---------------------------------------------------------------------------------
# append train & test
# ---------------------------------------------------------------------------------
train = pd.read_csv(join(data_path, 'train.csv'))
train['location_id'] = train.location.apply(lambda x: int(x.split('location ')[1]))
test = pd.read_csv(join(data_path, 'test.csv'))
test['fault_severity'] = -1
test['location_id'] = test.location.apply(lambda x: int(x.split('location ')[1]))
print 'train', train.shape, 'test', test.shape
features = train.append(test)
features = features.drop('location', axis=1)
print features.shape
# ---------------------------------------------------------------------------------
# order ~ time
# ---------------------------------------------------------------------------------
severity_type = pd.read_csv(join(data_path, 'severity_type.csv'))
severity_type_order = severity_type[['id']].drop_duplicates()
severity_type_order['order'] = 1. * np.arange(len(severity_type_order)) / len(severity_type_order)
features = pd.merge(features, severity_type_order, how='inner', on='id')
print features.shape
print features[:3]
# ---------------------------------------------------------------------------------
# location count
# ---------------------------------------------------------------------------------
location_count = features.groupby('location_id').count()[['id']]
location_count.columns = ['location_count']
features = pd.merge(features, location_count, how='inner', left_on='location_id', right_index=True)
print features.shape
# ---------------------------------------------------------------------------------
# binarize frequent locations
# ---------------------------------------------------------------------------------
frequent_locations = location_count[location_count['location_count'] > MIN_FREQ]
frequent_location_records = features[features['location_id'].isin(frequent_locations.index)].copy()
frequent_location_records['value'] = 1
location_features = frequent_location_records.pivot(index='id', columns='location_id', values='value')
location_features.columns = ['location_%i' % c for c in location_features.columns]
print 'location_features', location_features.shape
features = pd.merge(features, location_features, how='left', left_on='id', right_index=True)
features = features.fillna(0)
print features.shape
# ---------------------------------------------------------------------------------
# event type ['id', 'event_type'] (31170, 2)
# ---------------------------------------------------------------------------------
event_type = pd.read_csv(join(data_path, 'event_type.csv'))
event_count = event_type.groupby('id').count()[['event_type']]
event_count.columns = ['event_type_count']
features = pd.merge(features, event_count, how='inner', left_on='id', right_index=True)
print features.shape
event_type_count = event_type.groupby('event_type').count()[['id']].sort_values(by='id', ascending=False)
frequent_event_types = event_type_count[event_type_count['id'] > MIN_FREQ]
frequent_event_records = event_type[event_type['event_type'].isin(frequent_event_types.index)].copy()
frequent_event_records['value'] = 1
event_features = frequent_event_records.pivot(index='id', columns='event_type', values='value')
event_features.columns = map(lambda x: x.replace(' ', '_'), event_features.columns)
print 'event features', event_features.shape
features = pd.merge(features, event_features, how='left', left_on='id', right_index=True)
print features.shape
rare_event_types = event_type_count[event_type_count['id'] <= MIN_FREQ]
rare_event_records = event_type[event_type['event_type'].isin(rare_event_types.index)].copy()
rare_event_records['value'] = 1
rare_event_feature = rare_event_records.groupby('id').max()[['value']]
rare_event_feature.columns = ['rare_event_type']
features = pd.merge(features, rare_event_feature, how='left', left_on='id', right_index=True)
print features.shape
event_type['event_id'] = event_type.event_type.apply(lambda x: int(x.split('event_type ')[1]))
max_event_cat = event_type.groupby('id').max()[['event_id']] // 3
max_event_cat.columns = ['max_event_type_cat']
min_event_cat = event_type.groupby('id').min()[['event_id']] // 3
min_event_cat.columns = ['min_event_type_cat']
features = pd.merge(features, max_event_cat, how='left', left_on='id', right_index=True)
features = pd.merge(features, min_event_cat, how='left', left_on='id', right_index=True)
print features.shape
features = features.fillna(0)
# ---------------------------------------------------------------------------------
# log_feature ['id', 'log_feature', 'volume'] (58671, 3)
# ---------------------------------------------------------------------------------
log_feature = pd.read_csv(join(data_path, 'log_feature.csv'))
log_feature_count = log_feature.groupby('id').count()[['log_feature']]
log_feature_count.columns = ['log_feature_count']
features = pd.merge(features, log_feature_count, how='inner', left_on='id', right_index=True)
print features.shape
log_feature_count = log_feature.groupby('log_feature').count()[['id']].sort_values(by='id', ascending=False)
frequent_log_features = log_feature_count[log_feature_count['id'] > MIN_FREQ]
frequent_log_feature_records = log_feature[log_feature['log_feature'].isin(frequent_log_features.index)].copy()
log_feature_features = frequent_log_feature_records.pivot(index='id', columns='log_feature', values='volume')
log_feature_features.columns = map(lambda x: x.replace(' ', '_'), log_feature_features.columns)
log_feature_features.columns = map(lambda x: x.replace('feature', 'log_feature'), log_feature_features.columns)
print 'log_feature_features', log_feature_features.shape
features = pd.merge(features, log_feature_features, how='left', left_on='id', right_index=True)
print features.shape
rare_log_features = log_feature_count[log_feature_count['id'] <= MIN_FREQ]
rare_log_feature_records = log_feature[log_feature['log_feature'].isin(rare_log_features.index)].copy()
rare_log_feature_records['value'] = 1
rare_log_feature_feature = rare_log_feature_records.groupby('id').max()[['value']]
rare_log_feature_feature.columns = ['rare_log_feature']
features = pd.merge(features, rare_log_feature_feature, how='left', left_on='id', right_index=True)
print features.shape
log_feature['log_feature_id'] = log_feature.log_feature.apply(lambda x: int(x.split('feature ')[1]))
max_log_feature_cat = log_feature.groupby('id').max()[['log_feature_id']] // FEATURE_CAT
max_log_feature_cat.columns = ['max_log_feature_cat']
median_log_feature_cat = log_feature.groupby('id').median()[['log_feature_id']] // FEATURE_CAT
median_log_feature_cat.columns = ['median_log_feature_cat']
min_log_feature_cat = log_feature.groupby('id').min()[['log_feature_id']] // FEATURE_CAT
min_log_feature_cat.columns = ['min_log_feature_cat']
features = pd.merge(features, max_log_feature_cat, how='left', left_on='id', right_index=True)
features = pd.merge(features, median_log_feature_cat, how='left', left_on='id', right_index=True)
features = pd.merge(features, min_log_feature_cat, how='left', left_on='id', right_index=True)
print features.shape
log_feature['log_feature_id_cat'] = log_feature['log_feature_id'] // FEATURE_CAT
log_feature_cat = log_feature.groupby(['id', 'log_feature_id_cat']).sum()['volume']
log_feature_cat = log_feature_cat.reset_index()
log_feature_cat_feature = log_feature_cat.pivot(index='id', columns='log_feature_id_cat', values='volume')
log_feature_cat_feature.columns = ['log_feature_cat_%i' % c for c in log_feature_cat_feature.columns]
features = pd.merge(features, log_feature_cat_feature, how='left', left_on='id', right_index=True)
print 'log_feature_cat_feature', log_feature_cat_feature.shape
log_feature.loc[log_feature['volume'] > 49, 'volume'] = 50
volume_counts = log_feature.groupby(['id', 'volume']).count()[['log_feature']].reset_index()
volume_features = volume_counts.pivot(index='id', columns='volume', values='log_feature')
volume_features.columns = ['volume_%i' % c for c in volume_features.columns]
print 'volume_features', volume_features.shape
features = pd.merge(features, volume_features, how='left', left_on='id', right_index=True)
print features.shape
features = features.fillna(0)
# ---------------------------------------------------------------------------------
# resource_type ['id', 'resource_type'] (21076, 2)
# ---------------------------------------------------------------------------------
resource_type = pd.read_csv(join(data_path, 'resource_type.csv'))
resource_type['value'] = 1
resource_type_count = resource_type.groupby('id').count()[['value']]
resource_type_count.columns = ['resource_type_count']
features = pd.merge(features, resource_type_count, how='left', left_on='id', right_index=True)
resource_type_features = resource_type.pivot(index='id', columns='resource_type', values='value')
resource_type_features.columns = [c.replace(' ', '_') for c in resource_type_features.columns]
resource_type_features = resource_type_features[['resource_type_1', 'resource_type_10', 'resource_type_2',
'resource_type_3', 'resource_type_4', 'resource_type_6',
'resource_type_7', 'resource_type_8', 'resource_type_9']]
print 'resource_type_features', resource_type_features.shape
features = pd.merge(features, resource_type_features, how='left', left_on='id', right_index=True)
print features.shape
# ---------------------------------------------------------------------------------
# severity_type ['id', 'severity_type'] (18552, 2)
# ---------------------------------------------------------------------------------
severity_type = pd.read_csv(join(data_path, 'severity_type.csv'))
severity_type['value'] = 1
severity_type_features = severity_type.pivot(index='id', columns='severity_type', values='value')
severity_type_features.columns = [c.replace(' ', '_') for c in severity_type_features.columns]
severity_type_features = severity_type_features.fillna(0)
severity_type_features = severity_type_features[['severity_type_1', 'severity_type_2', 'severity_type_4', 'severity_type_5']]
print 'severity_type_features', severity_type_features.shape
features = pd.merge(features, severity_type_features, how='left', left_on='id', right_index=True)
print features.shape
features = features.fillna(0)
features['location_cat'] = features['location_id'] // LOCATION_CAT
features['location_cat2'] = (features['location_id'] + LOCATION_CAT//2) // LOCATION_CAT
features = features.sort_values(by='order')
feature_names = list(features.columns)
feature_names.remove('id')
feature_names.remove('fault_severity')
feature_names.remove('location_id')
feature_names.remove('order')
# ---------------------------------------------------------------------------------
# Before features
# ---------------------------------------------------------------------------------
ids = features['id'].values
location = features['location_id'].values
for shift in range(1, SHIFT + 1):
before_dt = features[feature_names].values
before_dt = before_dt[shift:, :] - before_dt[:-shift, :]
location_mask = 1. * (location[shift:] == location[:-shift])
location_mask[location_mask == 0] = np.nan
before_cols = [c + '_diff_before_%i' % shift for c in feature_names]
before_dt_df = pd.DataFrame(before_dt, columns=before_cols)
useful_cols = []
for c in before_cols:
before_dt_df[c] = before_dt_df[c] * location_mask
non_zero_count = np.sum(1*(before_dt_df[c].fillna(0) != 0))
if non_zero_count > MIN_FREQ:
useful_cols.append(c)
before_dt_df = before_dt_df[useful_cols].copy()
before_dt_df['id'] = ids[shift:]
features = pd.merge(features, before_dt_df, how='left', on='id')
print 'before', shift, features.shape
# ---------------------------------------------------------------------------------
# After features
# ---------------------------------------------------------------------------------
ids = features['id'].values
location = features['location_id'].values
for shift in range(1, SHIFT + 1):
after_dt = features[feature_names].values
after_dt = after_dt[:-shift, :] - after_dt[shift:, :]
location_mask = 1. * (location[:-shift] == location[shift:])
location_mask[location_mask == 0] = np.nan
after_cols = [c + '_diff_after_%i' % shift for c in feature_names]
after_dt_df = pd.DataFrame(after_dt, columns=after_cols)
useful_cols = []
for c in after_cols:
after_dt_df[c] = after_dt_df[c] * location_mask
non_zero_count = np.sum(1*(after_dt_df[c].fillna(0) != 0))
if non_zero_count > MIN_FREQ:
useful_cols.append(c)
after_dt_df = after_dt_df[useful_cols].copy()
after_dt_df['id'] = ids[:-shift]
features = pd.merge(features, after_dt_df, how='left', on='id')
print 'after', shift, features.shape
features = features.fillna(-9999)
# ---------------------------------------------------------------------------------
# before fault_severity
# ---------------------------------------------------------------------------------
ids = features['id'].values
location = features['location_id'].values
fault_severity = features['fault_severity'].values
for diff in range(1, FAULT_LOOKBACK + 1):
before_fault_severity = fault_severity[:-diff]
location_mask = 1. * (location[:-diff] == location[diff:])
location_mask[location_mask == 0] = np.nan
before_fault_severity_df = pd.DataFrame({'before_fs_%i' % diff: before_fault_severity})
before_fault_severity_df['before_fs_%i' % diff] = location_mask * before_fault_severity_df['before_fs_%i' % diff]
before_fault_severity_df['id'] = ids[diff:]
features = pd.merge(features, before_fault_severity_df, how='left', on='id')
before = features[['before_fs_%i' % d for d in range(1, FAULT_LOOKBACK+1)]]
before = before.replace(-1, np.nan)
before_values = before.values
for diff in range(3, FAULT_LOOKBACK + 1):
features['before_fs__mean_%i' % diff] = np.nanmean(before_values[:, :diff], axis=1)
features['before_fs_sum_%i' % diff] = np.nansum(before_values[:, :diff], axis=1)
before = before.replace(0, 1)
before = before.replace(2, 1)
before_values = before.values
for diff in range(3, FAULT_LOOKBACK + 1):
features['before_fs_count_%i' % diff] = np.nansum(before_values[:, :diff], axis=1)
# ---------------------------------------------------------------------------------
# after fault_severity
# ---------------------------------------------------------------------------------
ids = features['id'].values
location = features['location_id'].values
fault_severity = features['fault_severity'].values
for diff in range(1, FAULT_LOOKBACK+1):
after_fault_severity = fault_severity[diff:]
location_mask = 1. * (location[:-diff] == location[diff:])
location_mask[location_mask == 0] = np.nan
after_fault_severity_df = pd.DataFrame({'after_fs_%i' % diff: after_fault_severity})
after_fault_severity_df['after_fs_%i' % diff] = location_mask * after_fault_severity_df['after_fs_%i' % diff]
after_fault_severity_df['id'] = ids[:-diff]
features = pd.merge(features, after_fault_severity_df, how='left', on='id')
after = features[['after_fs_%i' % d for d in range(1, FAULT_LOOKBACK+1)]]
after = after.replace(-1, np.nan)
after_values = after.values
for diff in range(3, FAULT_LOOKBACK + 1):
features['after_fs__mean_%i' % diff] = np.nanmean(after_values[:, :diff], axis=1)
features['after_fs_sum_%i' % diff] = np.nansum(after_values[:, :diff], axis=1)
after = after.replace(0, 1)
after = after.replace(2, 1)
after_values = after.values
for diff in range(3, FAULT_LOOKBACK + 1):
features['after_fs_count_%i' % diff] = np.nansum(after_values[:, :diff], axis=1)
features = features.fillna(-9999)
# ---------------------------------------------------------------------------------
# rank features
# ---------------------------------------------------------------------------------
features['location_rank_asc'] = features.groupby('location_id')[['order']].rank()
features['location_rank_desc'] = features.groupby('location_id')[['order']].rank(ascending=False)
features['location_rank_rel'] = 1. * features['location_rank_asc'] / features['location_count']
features['location_rank_rel'] = np.round(features['location_rank_rel'], 2)
# ---------------------------------------------------------------------------------
# export
# ---------------------------------------------------------------------------------
feature_file_name = 'features_mf%i_lc%i_fc%i_fl%i_sh%i.csv' % (MIN_FREQ, LOCATION_CAT, FEATURE_CAT,
FAULT_LOOKBACK, SHIFT)
features.to_csv(join(feature_path, feature_file_name), index=False)
print 'final features', features.shape
time1 = dt.datetime.now()
print 'total:', (time1-time0).seconds, 'sec'
| mit |
ntucllab/striatum | setup.py | 1 | 1415 | #!/usr/bin/env python
import os
from setuptools import setup
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# read the docs could not compile numpy and c extensions
if on_rtd:
setup_requires = []
install_requires = []
else:
setup_requires = [
'nose',
'coverage',
]
install_requires = [
'six',
'numpy',
'scipy',
'matplotlib',
]
long_description = ("See `github <https://github.com/ntucllab/striatum>`_ "
"for more information.")
setup(
name='striatum',
version='0.2.5',
description='Contextual bandit in python',
long_description=long_description,
author='Y.-A. Lin, Y.-Y. Yang',
author_email='r02922163@csie.ntu.edu.tw, b01902066@csie.ntu.edu.tw',
url='https://github.com/ntucllab/striatum',
setup_requires=setup_requires,
install_requires=install_requires,
classifiers=[
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
test_suite='nose.collector',
packages=[
'striatum',
'striatum.bandit',
'striatum.storage',
'striatum.utils',
],
package_dir={
'striatum': 'striatum',
'striatum.bandit': 'striatum/bandit',
'striatum.storage': 'striatum/storage',
'striatum.utils': 'striatum/utils',
},
)
| bsd-2-clause |
asljivo1/802.11ah-ns3 | ns-3/src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
1a1a11a/mimircache | PyMimircache/profiler/profilerUtils.py | 1 | 8030 | # coding=utf-8
"""
This module provides some common utils shared by profilers
Author: Jason Yang <peter.waynechina@gmail.com> 2017/10
"""
import os
import math
import time
import pickle
import numpy as np
import matplotlib
# matplotlib.use("Agg")
# pypy3 fails on this
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from PyMimircache.utils.printing import *
def get_breakpoints(reader, time_mode, time_interval=-1, num_of_pixel_of_time_dim=-1, **kwargs):
"""
retrieve the breakpoints given time_mode and time_interval or num_of_pixel_of_time_dim,
break point breaks the trace into chunks of given time_interval
:param reader: reader for reading trace
:param time_mode: either real time (r) or virtual time (v)
:param time_interval: the intended time_interval of data chunk
:param num_of_pixel_of_time_dim: the number of chunks, this is used when it is hard to estimate time_interval,
you only need specify one, either num_of_pixel_of_time_dim or time_interval
:param kwargs: not used now
:return: a numpy list of break points begin with 0, ends with total_num_requests
"""
assert time_interval!=-1 or num_of_pixel_of_time_dim!=-1, \
"please specify at least one of the following: time_interval, num_of_pixel_of_time_dim"
bp = []
if time_mode == "v":
num_req = reader.get_num_of_req()
if time_interval == -1:
time_interval = int(math.ceil(num_req / num_of_pixel_of_time_dim) + 1)
for i in range(int(math.ceil(num_req/time_interval))):
bp.append(i * time_interval)
if bp[-1] != num_req:
bp.append(num_req)
elif time_mode == "r":
time_column = getattr(reader, "time_column", -1)
assert time_column!=-1, "provided reader does not have time column"
if time_interval == -1:
first_ts = reader.read_first_req()[time_column-1]
last_ts = reader.read_last_req()[time_column-1]
time_interval = int(math.ceil((last_ts - first_ts) / num_of_pixel_of_time_dim + 1))
bp.append(0)
ind = 0
line = reader.read_time_req()
last_ts = line[0]
while line:
if line[0] - last_ts > time_interval:
bp.append(ind)
last_ts = line[0]
line = reader.read_time_req()
ind += 1
if bp[-1] != ind:
bp.append(ind)
else:
raise RuntimeError("unknown time_mode {}".format(time_mode))
return bp
def util_plotHRC(x_list, hit_ratio, **kwargs):
"""
plot hit ratio curve of the given trace under given algorithm
:param kwargs: figname, cache_unit_size (unit: Byte), no_clear, no_save
:return:
"""
kwargs["figname"] = kwargs.get("figname", "HRC.png")
kwargs["xlabel"] = kwargs.get("xlabel", "Cache Size (Items)")
kwargs["ylabel"] = kwargs.get("ylabel", "Hit Ratio")
kwargs["title"] = kwargs.get("title", "Hit Ratio Curve")
cache_unit_size = kwargs.get("cache_unit_size", 0)
if cache_unit_size != 0:
kwargs["xlabel"] = "Cache Size (MB)"
ffm = ticker.FuncFormatter(lambda x, p: int(x * cache_unit_size // 1024 // 1024))
plt.gca().xaxis.set_major_formatter(ffm)
draw2d(x_list, hit_ratio, **kwargs)
return
def draw2d(*args, **kwargs):
figname = kwargs.get("figname", "2dPlot.png")
if "plot_type" in kwargs:
if kwargs['plot_type'] == "scatter":
l = args[0]
plt.scatter([i+1 for i in range(len(l))], l, label=kwargs.get("label", None))
else:
if 'logX' in kwargs and kwargs["logX"]:
if 'logY' in kwargs and kwargs["logY"]:
plt.loglog(*args, label=kwargs.get("label", None))
else:
plt.semilogx(*args, label=kwargs.get("label", None))
else:
if 'logY' in kwargs and kwargs["logY"]:
plt.semilogy(*args, label=kwargs.get("label", None))
else:
plt.plot(*args, label=kwargs.get("label", None))
set_fig(**kwargs)
if not kwargs.get("no_save", False):
# if folder does not exist, create the folder
dname = os.path.dirname(figname)
if dname and not os.path.exists(dname):
os.makedirs(dname)
plt.savefig(figname, dpi=600)
if not kwargs.get("no_print_info", False):
INFO("plot is saved as {}".format(figname))
if not kwargs.get("no_show", False):
try: plt.show()
except: pass
if not kwargs.get("no_clear", False):
plt.clf()
def draw_heatmap(plot_array, **kwargs):
filename = kwargs.get("figname", 'heatmap.png')
try:
imshow_kwargs = kwargs.get("imshow_kwargs", {})
if "cmap" not in imshow_kwargs:
imshow_kwargs["cmap"] = plt.cm.jet
else:
imshow_kwargs["cmap"] = plt.get_cmap(imshow_kwargs["cmap"])
imshow_kwargs["cmap"].set_bad(color='white', alpha=1.)
img = plt.imshow(plot_array, interpolation='nearest', origin='lower',
aspect='auto', **imshow_kwargs)
cb = plt.colorbar(img)
set_fig(no_legend=True, **kwargs)
if not kwargs.get("no_save", False):
plt.savefig(filename, dpi=600)
INFO("plot is saved as {}".format(filename))
if not kwargs.get("no_show", False):
try: plt.show()
except: pass
if not kwargs.get("no_clear", False):
try: plt.clf()
except: pass
except Exception as e:
try:
t = int(time.time())
with open("/tmp/heatmap.{}.pickle".format(t), 'wb') as ofile:
pickle.dump(plot_array, ofile)
WARNING("plotting using imshow failed: {}, "
"now try to save the plotting data to /tmp/heatmap.{}.pickle".format(e, t))
except Exception as e:
ERROR("failed to save plotting data")
try:
cmap = plt.get_cmap("Oranges")
plt.pcolormesh(plot_array.T, cmap=cmap)
plt.savefig(filename)
except Exception as e:
WARNING("further plotting using pcolormesh failed" + str(e))
def set_fig(**kwargs):
"""
change figures
:param kwargs:
:return:
"""
# set label
if kwargs.get("xlabel", None):
plt.xlabel(kwargs['xlabel'])
if kwargs.get('ylabel', None):
plt.ylabel(kwargs['ylabel'])
# set tick
if kwargs.get('xticks', None):
xticks = kwargs['xticks']
if isinstance(xticks, list) or isinstance(xticks, tuple):
plt.xticks(*xticks)
elif callable(xticks):
plt.gca().xaxis.set_major_formatter(xticks)
else:
WARNING("unknown xticks {}".format(xticks))
if kwargs.get('yticks', None):
yticks = kwargs['yticks']
if isinstance(yticks, list) or isinstance(yticks, tuple):
plt.yticks(*yticks)
elif callable(yticks):
plt.gca().yaxis.set_major_formatter(yticks)
else:
WARNING("unknown yticks {}".format(yticks))
# set limit
if kwargs.get("xlimit", None):
plt.xlim(kwargs["xlimit"])
if kwargs.get('ylimit', None):
plt.ylim(kwargs["ylimit"])
# set title
if kwargs.get('title', None):
plt.title(kwargs['title'])
# if x axis label are too long, then rotate it
if 'rotateXAxisTick' in kwargs.keys():
xrotate = kwargs['rotateXAxisTick']
if isinstance(xrotate, bool):
plt.xticks(rotation="vertical")
elif isinstance(xrotate, (int, float)):
plt.xticks(rotation=xrotate)
else:
plt.xticks(rotation="vertical")
WARNING("unknown rotateXAxisTick {}".format(xrotate))
# legend
if not kwargs.get("no_legend", False):
plt.legend(loc="best")
# tight layout
if kwargs.get("tight_layout", True):
plt.tight_layout()
| gpl-3.0 |
bccp/bananaplots | bananas/bananas.py | 1 | 12737 | import numpy
__version__ = "0.0.3"
from .model import GMM, Confidence, CombinedModel
from functools import reduce
def _sorteditems(d, orderby):
""" return items from a dict of dict, sorted by the orderby item of the dict """
s = sorted([(i[orderby], k) for k, i in d.items()])
return [(k, d[k]) for i, k in s]
class Bananas(object):
def __init__(self):
self.features = {}
self._unique = 0
self.surfaces = {}
def set_surface(self, surface, **attrs):
"""
Add a surface with attributes.
Notes
-----
compiler attributes are prefixed with 'compiler_'
Returns
-------
the surface object
"""
if not surface in self.surfaces:
self.surfaces[surface] = dict(
colorfamily='r',
order=self._unique,
label=None,
cmap=None,
linewidth=1.0,
linestyle='-',
color=None,
levels=[0.68, 0.95],
)
self._unique = self._unique + 10
self.surfaces[surface].update(attrs)
return surface
def get_surface_attr(self, surface, attr):
from matplotlib import cm
f = self.surfaces[surface]
if f[attr] is not None:
return f[attr]
if attr == 'label':
return str(surface)
if attr == 'color':
cmap = self.get_surface_attr(surface, 'cmap')
return cmap(0.3)
if attr == 'cmap':
color = f['colorfamily']
shorts = {'b' : 'blue',
'r' : 'red',
'g' : 'green',
'y' : 'yellow',
'm' : 'magenta',
'k' : 'black'}
color = shorts.get(color, color)
return {'blue' : cm.Blues_r,
'red' : cm.Reds_r,
'green' : cm.Greens_r,
'yellow' : cm.Oranges_r,
'magenta' : cm.Purples_r,
'black' : cm.Greys_r,
}[color]
def set_feature(self, feature, **attrs):
if not feature in self.features:
self.features[feature] = dict(
order=self._unique,
label=None,
range=None,
)
self._unique = self._unique + 10
self.features[feature].update(attrs)
def get_feature_attr(self, feature, attr):
if not feature in self.features:
self.set_feature(feature)
f = self.features[feature]
if f[attr] is not None:
return f[attr]
if attr == 'label':
return str(feature)
if attr == 'range':
mins = [s[feature].vmin for s in self.surfaces]
maxes = [s[feature].vmax for s in self.surfaces]
return (min(mins), max(maxes))
def render(self, axes, f1, f2, **options):
axes.set_xlabel(self.get_feature_attr(f1, 'label'))
axes.set_ylabel(self.get_feature_attr(f2, 'label'))
x = numpy.linspace(*self.get_feature_attr(f1,'range'), num=512)
y = numpy.linspace(*self.get_feature_attr(f2,'range'), num=512)
X, Y = numpy.meshgrid(x, y)
filled = options.get('filled', True)
contour_labels = options.get('contour_labels', False)
crosshair = options.get('crosshair', False)
for surface, attrs in _sorteditems(self.surfaces, 'order'):
cmap = self.get_surface_attr(surface, 'cmap')
color = self.get_surface_attr(surface, 'color')
linestyle = self.get_surface_attr(surface, 'linestyle')
linewidth = self.get_surface_attr(surface, 'linewidth')
style = dict(linestyle=linestyle, linewidth=linewidth)
levels = self.get_surface_attr(surface, 'levels')
m = surface.marginalize((f1, f2))
Z = m.confidence(X, Y)
if filled:
CS = axes.contourf(X, Y, Z,
levels=[0] + levels,
vmin=0.0, vmax=1.0,
cmap=cmap, alpha=0.7)
CS = axes.contour(X, Y, Z,
levels=levels,
vmin=0.0, vmax=2.0,
cmap=cmap, **style)
if crosshair:
x = surface[f1].peak
y = surface[f2].peak
if x is not None and y is not None:
axes.axvline(x, color=color, **style)
axes.axhline(y, color=color, **style)
if contour_labels:
TXT = axes.clabel(CS)
def render1d(self, axes, f1, **options):
crosshair = options.get('crosshair', False)
range = self.get_feature_attr(f1, 'range')
axes.set_xlabel(self.get_feature_attr(f1, 'label'))
axes.set_xlim(range)
x = numpy.linspace(*range, num=512)
for surface, attrs in _sorteditems(self.surfaces, 'order'):
label = self.get_surface_attr(surface, 'label')
cmap = self.get_surface_attr(surface, 'cmap')
color = self.get_surface_attr(surface, 'color')
linestyle = self.get_surface_attr(surface, 'linestyle')
linewidth = self.get_surface_attr(surface, 'linewidth')
style = dict(linestyle=linestyle, linewidth=linewidth)
m = surface.marginalize((f1, ))
Z = numpy.exp(m.lnprob(x))
axes.plot(x, Z, label=label, color=color, **style)
if crosshair:
c = surface[f1].peak
if c is not None:
axes.axvline(c, color=color, **style)
def rendernd(self, figure, features, gridspec=None, **options):
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import NullFormatter
from itertools import product
if gridspec is None:
gridspec = GridSpec(len(features), len(features), hspace=0, wspace=0)
corner = options.get('corner', 'lower left')
axes = {}
config = {
'upper right' : [lambda i, j : i < j, (0, 'top', len(features) - 1, 'right')],
'lower left' : [lambda i, j : i > j, (len(features) - 1, 'bottom', 0, 'left')]
}
for i, j in product(range(len(features)), range(len(features))):
ax = figure.add_subplot(gridspec[i, j])
axes[i, j] = ax
visible = config[corner][0]
if i == j:
self.render1d(ax, features[i], **options)
ax.locator_params(axis='y', nbins=5)
ax.yaxis.set_major_formatter(NullFormatter())
continue
if visible(i, j):
self.render(ax, features[j], features[i], **options)
else:
ax.set_axis_off()
for (i, j), ax in axes.items():
ax.locator_params(axis='y', prune='both')
ax.locator_params(axis='x', prune='both')
for (i, j), ax in axes.items():
xedge, xpos, yedge, ypos = config[corner][1]
if i != xedge:
ax.xaxis.set_major_formatter(NullFormatter())
ax.xaxis.get_label().set_visible(False)
else:
ax.xaxis.set_label_position(xpos)
if j != yedge:
ax.yaxis.set_major_formatter(NullFormatter())
ax.yaxis.get_label().set_visible(False)
else:
ax.yaxis.set_label_position(ypos)
return axes
def get_legend_handlers_labels(self):
from matplotlib import patches as mpatches
proxies = []
labels = []
for surface, attrs in _sorteditems(self.surfaces, 'order'):
label = self.get_surface_attr(surface, 'label')
color = self.get_surface_attr(surface, 'color')
proxies.append(mpatches.Patch(color=color))
labels.append(label)
return proxies, labels
class Surface(object):
def __getitem__(self, name):
return self.features[name]
def marginalize(self, features, **options):
axes = []
for name in features:
axes.append(self.names.index(name))
model = self.model.marginalize(axes)
conf = Confidence.fit(model, **options)
return Marginalized(model, conf)
pass
class Feature(object):
def __init__(self, data, vmin=None, vmax=None, peak=None):
if isinstance(data, Feature):
if vmin is None:
vmin = data.vmin
if vmax is None:
vmax = data.vmax
if peak is None:
peak = data.peak
data = data.data
else:
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
# only 1d feature is supported
assert len(numpy.shape(data)) == 1
self.data = data
self.vmin = vmin
self.vmax = vmax
self.peak = peak
def __add__(self, other):
return Feature(numpy.concatenate([self.data, other.data]),
vmin=numpy.min([self.vmin, other.vmin]),
vmax=numpy.max([self.vmax, other.vmax]),
peak=None)
class MCChain(Surface):
"""
A log-likelyhood surface represented by a Markov Chain sample.
Parameters
----------
**features : dict
key: name of the feature,
value : a :py:class:`Feature` object or a 1-d numpy array.
array will be cast to a :py:class:`Feature` object.
"""
def __init__(self, **features):
self.features = {}
for name, feature in features.items():
self.features[name] = Feature(feature)
def __add__(self, other):
features = {}
for name in self.features:
if not name in other.features:
continue
features[name] = self.features[name] + other.features[name]
return MCChain(**features)
def compile(chain, nc=1, nb=20):
data = []
names = []
limits = []
for name in chain.features:
# only 1d name is supported
feature = chain.features[name]
data.append(feature.data.reshape(1, -1))
# remove the data from feature
names.append((name, Feature([], feature.vmin, feature.vmax, feature.peak)))
limits.append((feature.vmin, feature.vmax))
X = numpy.concatenate(data, axis=0).T
model = GMM.fit(nc, X, limits)
conf = Confidence.fit(model, nb=nb)
return GMMSurface(names, model)
class Marginalized(object):
def __init__(self, model, conf):
self.model = model
self.conf = conf
def lnprob(self, *args):
args = numpy.array(numpy.broadcast_arrays(*args), copy=True)
shape = args[0].shape
args = args.reshape(len(args), -1)
X = args.T
lnprob = self.model.score(X)
lnprob = lnprob.reshape(shape)
return lnprob
def confidence(self, *args):
lnprob = self.lnprob(*args)
return self.conf.score(lnprob)
class CombinedSurface(Surface):
def __init__(self, surfaces):
names = []
for s in surfaces:
names.extend(s.names)
common = list(set(names))
axes = []
for s in surfaces:
axes.append([ s.names.index(name) for name in common])
features = []
for name in common:
f = reduce(lambda x, y: x + y, [s.features[name] for s in surfaces])
features.append((name, f))
self.features = dict(features)
self.names = common
models = [surface.model.marginalize(axes0) for surface, axes0 in zip(surfaces, axes)]
self.model = CombinedModel(models)
def marginalize(self, features, **options):
axes = []
for name in features:
axes.append(self.names.index(name))
model = self.model.marginalize(axes)
conf = Confidence.fit(model, **options)
return Marginalized(model, conf)
class GMMSurface(Surface):
""" A surface that is modelled by GMM. features is a list of (name, feature). """
def __init__(self, features, model):
self.features = dict(features)
self.names = [feature[0] for feature in features]
self.model = model
def __mul__(self, other):
return CombinedSurface([self, other])
| apache-2.0 |
gregsharp/vowpal_wabbit | python/tests/test_sklearn_vw.py | 6 | 5431 | from collections import namedtuple
import numpy as np
import pytest
from vowpalwabbit.sklearn_vw import VW, VWClassifier, VWRegressor, tovw
from sklearn import datasets
from sklearn.utils.validation import NotFittedError
from scipy.sparse import csr_matrix
"""
Test utilities to support integration of Vowpal Wabbit and scikit-learn
"""
Dataset = namedtuple('Dataset', 'x, y')
@pytest.fixture(scope='module')
def data():
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x = x.astype(np.float32)
return Dataset(x=x, y=y)
class TestVW:
def test_validate_vw_estimator(self):
"""
Run VW and VWClassifier through the sklearn estimator validation check
Note: the VW estimators fail sklearn's estimator validation check. The validator creates a new
instance of the estimator with the estimator's default args, '--quiet' in VW's case. At some point
in the validation sequence it calls fit() with some fake data. The data gets formatted via tovw() to:
2 1 | 0:0.5488135039273248 1:0.7151893663724195 2:0.6027633760716439 3:0.5448831829968969 4:0.4236547993389047 5:0.6458941130666561 6:0.4375872112626925 7:0.8917730007820798 8:0.9636627605010293 9:0.3834415188257777
This gets passed into vw.learn and the python process dies with the error, "Process finished with exit code 139"
At some point it would probably be worth while figuring out the problem this and getting the two estimators to
pass sklearn's validation check
"""
# check_estimator(VW)
# check_estimator(VWClassifier)
def test_init(self):
assert isinstance(VW(), VW)
def test_fit(self, data):
model = VW(loss_function='logistic')
assert not hasattr(model, 'fit_')
model.fit(data.x, data.y)
assert model.fit_
def test_passes(self, data):
n_passes = 2
model = VW(loss_function='logistic', passes=n_passes)
assert model.passes_ == n_passes
model.fit(data.x, data.y)
weights = model.get_coefs()
model = VW(loss_function='logistic')
# first pass weights should not be the same
model.fit(data.x, data.y)
assert not np.allclose(weights.data, model.get_coefs().data)
def test_predict_not_fit(self, data):
model = VW(loss_function='logistic')
with pytest.raises(NotFittedError):
model.predict(data.x[0])
def test_predict(self, data):
model = VW(loss_function='logistic')
model.fit(data.x, data.y)
assert np.isclose(model.predict(data.x[:1][:1])[0], 0.406929)
def test_predict_no_convert(self):
model = VW(loss_function='logistic', convert_to_vw=False)
model.fit(['-1 | bad', '1 | good'])
assert np.isclose(model.predict(['| good'])[0], 0.245515)
def test_set_params(self):
model = VW()
assert 'l' not in model.params
model.set_params(l=0.1)
assert model.params['l'] == 0.1
# confirm model params reset with new construction
model = VW()
assert 'l' not in model.params
def test_get_coefs(self, data):
model = VW()
model.fit(data.x, data.y)
weights = model.get_coefs()
assert np.allclose(weights.indices, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 116060])
def test_get_intercept(self, data):
model = VW()
model.fit(data.x, data.y)
intercept = model.get_intercept()
assert isinstance(intercept, float)
def test_oaa(self):
X = ['1 | feature1:2.5',
'2 | feature1:0.11 feature2:-0.0741',
'3 | feature3:2.33 feature4:0.8 feature5:-3.1',
'1 | feature2:-0.028 feature1:4.43',
'2 | feature5:1.532 feature6:-3.2']
model = VW(convert_to_vw=False, oaa=3)
model.fit(X)
assert np.allclose(model.predict(X), [ 1., 2., 3., 1., 2.])
class TestVWClassifier:
def test_init(self):
assert isinstance(VWClassifier(), VWClassifier)
def test_decision_function(self, data):
classes = np.array([-1., 1.])
raw_model = VW(loss_function='logistic')
raw_model.fit(data.x, data.y)
predictions = raw_model.predict(data.x)
class_indices = (predictions > 0).astype(np.int)
expected = classes[class_indices]
model = VWClassifier()
model.fit(data.x, data.y)
actual = model.predict(data.x)
assert np.allclose(expected, actual)
class TestVWRegressor:
def test_init(self):
assert isinstance(VWRegressor(), VWRegressor)
def test_predict(self, data):
raw_model = VW()
raw_model.fit(data.x, data.y)
model = VWRegressor()
model.fit(data.x, data.y)
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
# ensure model can make multiple calls to predict
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
def test_delete(self):
raw_model = VW()
del raw_model
def test_tovw():
x = np.array([[1.2, 3.4, 5.6, 1.0, 10], [7.8, 9.10, 11, 0, 20]])
y = np.array([1, -1])
w = [1, 2]
expected = ['1 1 | 0:1.2 1:3.4 2:5.6 3:1 4:10',
'-1 2 | 0:7.8 1:9.1 2:11 4:20']
assert tovw(x=x, y=y, sample_weight=w) == expected
assert tovw(x=csr_matrix(x), y=y, sample_weight=w) == expected
| bsd-3-clause |
henrytao-me/openerp.positionq | openerp/addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
baudren/montepython_public | montepython/likelihoods/BK14/__init__.py | 2 | 17975 | """
.. module:: BK14
:synopsis: BK14 likelihood from http://arxiv.org/pdf/1510.09217.pdf, http://bicepkeck.org/bk14_2015_release.html
.. moduleauthor:: Thomas Tram <thomas.tram@port.ac.uk>
Last updated July 20, 2016. Based on the CosmoMC module.
"""
import numpy as np
import pandas as pd
import scipy.linalg as la
import montepython.io_mp as io_mp
import os
from montepython.likelihood_class import Likelihood_sn
T_CMB = 2.7255 #CMB temperature
h = 6.62606957e-34 #Planck's constant
kB = 1.3806488e-23 #Boltzmann constant
Ghz_Kelvin = h/kB*1e9 #GHz Kelvin conversion
class BK14(Likelihood_sn):
def __init__(self, path, data, command_line):
# Unusual construction, since the data files are not distributed
# alongside BK14 (size problems)
try:
# Read the .dataset file specifying the data.
super(BK14, self).__init__(path, data, command_line)
except IOError:
raise io_mp.LikelihoodError(
"The BK14 data files were not found. Please download the "
"following link "
"http://bicepkeck.org/BK14_datarelease/BK14_cosmomc.tgz"
", extract it, and copy the BK14 folder inside"
"`BK14_cosmomc/data/` to `your_montepython/data/`")
# Require tensor modes from CLASS as well as nonlinear lensing.
# Nonlinearities enhance the B-mode power spectrum by more than 6%
# at l>100. (Even more at l>2000, but not relevant to BICEP.)
# See http://arxiv.org/abs/astro-ph/0601594.
arguments = {
'output': 'tCl pCl lCl',
'lensing': 'yes',
'modes': 's, t',
'l_max_scalars': 2000,
'k_max_tau0_over_l_max': 7.0,
'non linear':'HALOFIT' if self.do_nonlinear else '',
'accurate_lensing':1,
'l_max_tensors': self.cl_lmax}
self.need_cosmo_arguments(data, arguments)
map_names_used = self.map_names_used.split()
map_fields = self.map_fields.split()
map_names = self.map_names.split()
self.map_fields_used = [maptype for i, maptype in enumerate(map_fields) if map_names[i] in map_names_used]
nmaps = len(map_names_used)
ncrossmaps = nmaps*(nmaps+1)/2
nbins = int(self.nbins)
## This constructs a different flattening of triangular matrices.
## v = [m for n in range(nmaps) for m in range(n,nmaps)]
## w = [m for n in range(nmaps) for m in range(nmaps-n)]
## # Store the indices in a tuple of integer arrays for later use.
## self.flat_to_diag = (np.array(v),np.array(w))
# We choose the tril_indices layout for flat indexing of the triangular matrix
self.flat_to_diag = np.tril_indices(nmaps)
self.diag_to_flat = np.zeros((nmaps,nmaps),dtype='int')
# It is now easy to generate an array with the corresponding flattened indices. (We only fill the lower triangular part.)
self.diag_to_flat[self.flat_to_diag] = range(ncrossmaps)
# Read in bandpasses
self.ReadBandpasses()
# Read window bins
self.window_data = np.zeros((int(self.nbins),int(self.cl_lmax),ncrossmaps))
# Retrieve mask and index permutation of windows:
indices, mask = self.GetIndicesAndMask(self.bin_window_in_order.split())
for k in range(nbins):
windowfile = os.path.join(self.data_directory, self.bin_window_files.replace('%u',str(k+1)))
tmp = pd.read_table(windowfile,comment='#',sep=' ',header=None, index_col=0).as_matrix()
# Apply mask
tmp = tmp[:,mask]
# Permute columns and store this bin
self.window_data[k][:,indices] = tmp
# print 'window_data',self.window_data.shape
#Read covmat fiducial
# Retrieve mask and index permutation for a single bin.
indices, mask = self.GetIndicesAndMask(self.covmat_cl.split())
# Extend mask and indices. Mask just need to be copied, indices needs to be increased:
superindices = []
supermask = []
for k in range(nbins):
superindices += [idx+k*ncrossmaps for idx in indices]
supermask += list(mask)
supermask = np.array(supermask)
tmp = pd.read_table(os.path.join(self.data_directory, self.covmat_fiducial),comment='#',sep=' ',header=None,skipinitialspace=True).as_matrix()
# Apply mask:
tmp = tmp[:,supermask][supermask,:]
print 'Covmat read with shape',tmp.shape
# Store covmat in correct order
self.covmat = np.zeros((nbins*ncrossmaps,nbins*ncrossmaps))
for index_tmp, index_covmat in enumerate(superindices):
self.covmat[index_covmat,superindices] = tmp[index_tmp,:]
#Compute inverse and store
self.covmat_inverse = la.inv(self.covmat)
# print 'covmat',self.covmat.shape
# print self.covmat_inverse
nbins = int(self.nbins)
# Read noise:
self.cl_noise_matrix = self.ReadMatrix(self.cl_noise_file,self.cl_noise_order)
# Read Chat and perhaps add noise:
self.cl_hat_matrix = self.ReadMatrix(self.cl_hat_file,self.cl_hat_order)
if not self.cl_hat_includes_noise:
for k in range(nbins):
self.cl_hat_matrix[k] += self.cl_noise_matrix[k]
# Read cl_fiducial and perhaps add noise:
self.cl_fiducial_sqrt_matrix = self.ReadMatrix(self.cl_fiducial_file,self.cl_fiducial_order)
if not self.cl_fiducial_includes_noise:
for k in range(nbins):
self.cl_fiducial_sqrt_matrix[k] += self.cl_noise_matrix[k]
# Now take matrix square root:
for k in range(nbins):
self.cl_fiducial_sqrt_matrix[k] = la.sqrtm(self.cl_fiducial_sqrt_matrix[k])
def ReadMatrix(self, filename, crossmaps):
"""
Read matrices for each ell-bin for all maps inside crossmaps and
ordered in the same way as usedmaps. Returns list of matrices.
"""
usedmaps = self.map_names_used.split()
nmaps = len(usedmaps)
# Get mask and indices
indices, mask = self.GetIndicesAndMask(crossmaps.split())
# Read matrix in packed format
A = pd.read_table(os.path.join(self.data_directory, filename),comment='#',sep=' ',header=None, index_col=0).as_matrix()
# Apply mask
A = A[:,mask]
# Create matrix for each bin and unpack A:
Mlist = []
# Loop over bins:
for k in range(int(self.nbins)):
M = np.zeros((nmaps,nmaps))
Mflat = np.zeros((nmaps*(nmaps+1)/2))
Mflat[indices] = A[k,:]
M[self.flat_to_diag] = Mflat
# Symmetrise M and append to list:
Mlist.append(M+M.T-np.diag(M.diagonal()))
return Mlist
def GetIndicesAndMask(self, crossmaplist):
"""
Given a list of used maps and a list of available crossmaps, find a mask
for the used crossmaps, and for each used crossmap, compute the falttened
triangular index. We must allow map1 and map2 to be interchanged.
If someone finds a nicer way to do this, please email me.
"""
usedmaps = self.map_names_used.split()
nmaps = len(usedmaps)
mask = np.array([False for i in range(len(crossmaplist))])
flatindex = []
for i, crossmap in enumerate(crossmaplist):
map1, map2 = crossmap.split('x')
if map1 in usedmaps and map2 in usedmaps:
index1 = usedmaps.index(map1)
index2 = usedmaps.index(map2)
# This calculates the flat index in a diagonal flattening:
# if index1 > index2:
# flatindex.append((index1-index2)*(2*nmaps+1-index1+index2)/2+index2)
# else:
# flatindex.append((index2-index1)*(2*nmaps+1-index2+index1)/2+index1)
# This calculates the flat index in the standard numpy.tril_indices() way:
if index1 > index2:
flatindex.append(index1*(index1+1)/2+index2)
else:
flatindex.append(index2*(index2+1)/2+index1)
mask[i] = True
return flatindex, mask
def ReadBandpasses(self):
"""
Read bandpasses and compute some thermodynamic quantities.
Everything stored in the dictionary self.bandpasses.
"""
#Read bandpasses
self.bandpasses = {}
map_fields = self.map_fields.split()
map_names = self.map_names.split()
map_names_used = self.map_names_used.split()
for key in map_names_used:
self.bandpasses[key] = {'field':map_fields[map_names.index(key)],'filename':getattr(self, 'bandpass['+key+']')}
for key, valdict in self.bandpasses.iteritems():
tmp = np.loadtxt(os.path.join(self.data_directory, valdict['filename']))
#Frequency nu, response resp:
valdict['nu'] = tmp[:,0]
valdict['resp'] = tmp[:,1]
valdict['dnu'] = np.gradient(valdict['nu'])
# Calculate thermodynamic temperature conversion between this bandpass
# and pivot frequencies 353 GHz (used for dust) and 23 GHz (used for
# sync).
th_int = np.sum(valdict['dnu']*valdict['resp']*valdict['nu']**4*np.exp(Ghz_Kelvin*valdict['nu']/T_CMB)/(np.exp(Ghz_Kelvin*valdict['nu']/T_CMB)-1.)**2)
nu0=353.
th0 = nu0**4*np.exp(Ghz_Kelvin*nu0/T_CMB) / (np.exp(Ghz_Kelvin*nu0/T_CMB) - 1.)**2
valdict['th353'] = th_int / th0
nu0=23.
th0 = nu0**4*np.exp(Ghz_Kelvin*nu0/T_CMB) / (np.exp(Ghz_Kelvin*nu0/T_CMB) - 1.)**2
valdict['th023'] = th_int / th0
#print 'th353:', valdict['th353'], 'th023:', valdict['th023']
def loglkl(self, cosmo, data):
"""
Compute negative log-likelihood using the Hamimeche-Lewis formalism, see
http://arxiv.org/abs/arXiv:0801.0554
"""
# Define the matrix transform
def MatrixTransform(C, Chat, CfHalf):
# C is real and symmetric, so we can use eigh()
D, U = la.eigh(C)
D = np.abs(D)
S = np.sqrt(D)
# Now form B = C^{-1/2} Chat C^{-1/2}. I am using broadcasting to divide rows and columns
# by the eigenvalues, not sure if it is faster to form the matmul(S.T, S) matrix.
# B = U S^{-1} V^T Chat U S^{-1} U^T
B = np.dot(np.dot(U,np.dot(np.dot(U.T,Chat),U)/S[:,None]/S[None,:]),U.T)
# Now evaluate the matrix function g[B]:
D, U = la.eigh(B)
gD = np.sign(D-1.)*np.sqrt(2.*np.maximum(0.,D-np.log(D)-1.))
# Final transformation. U*gD = U*gD[None,:] done by broadcasting. Collect chain matrix multiplication using reduce.
M = reduce(np.dot, [CfHalf,U*gD[None,:],U.T,CfHalf.T])
#M = np.dot(np.dot(np.dot(CfHalf,U*gD[None,:]),U.T),Cfhalf.T)
return M
# Recover Cl_s from CLASS, which is a dictionary, with the method
# get_cl from the Likelihood class, because it already makes the
# conversion to uK^2.
dict_Cls = self.get_cl(cosmo, self.cl_lmax)
# Make short hand expressions and remove l=0.
ell = dict_Cls['ell'][1:]
DlEE = ell*(ell+1)*dict_Cls['ee'][1:]/(2*np.pi)
DlBB = ell*(ell+1)*dict_Cls['bb'][1:]/(2*np.pi)
# Update foreground model
self.UpdateForegroundModel(cosmo, data)
#Make names and fields into lists
map_names = self.map_names_used.split()
map_fields = self.map_fields_used
nmaps = len(map_names)
ncrossmaps = nmaps*(nmaps+1)/2
nbins = int(self.nbins)
# Initialise Cls matrix to zero:
Cls = np.zeros((nbins,nmaps,nmaps))
# Initialise the X vector:
X = np.zeros((nbins*ncrossmaps))
for i in range(nmaps):
for j in range(i+1):
#If EE or BB, add theoretical prediction including foreground:
if map_fields[i]==map_fields[j]=='E' or map_fields[i]==map_fields[j]=='B':
map1 = map_names[i]
map2 = map_names[j]
dust = self.fdust[map1]*self.fdust[map2]
sync = self.fsync[map1]*self.fsync[map2]
dustsync = self.fdust[map1]*self.fsync[map2] + self.fdust[map2]*self.fsync[map1]
# if EE spectrum, multiply foregrounds by the EE/BB ratio:
if map_fields[i]=='E':
dust = dust * self.EEtoBB_dust
sync = sync * self.EEtoBB_sync
dustsync = dustsync * np.sqrt(self.EEtoBB_dust*self.EEtoBB_sync)
# Deep copy is important here, since we want to reuse DlXX for each map.
DlXXwithforegound = np.copy(DlEE)
else:
DlXXwithforegound = np.copy(DlBB)
# Finally add the foreground model:
DlXXwithforegound += (dust*self.dustcoeff+sync*self.synccoeff+dustsync*self.dustsynccoeff)
# Apply the binning using the window function:
for k in range(nbins):
Cls[k,i,j] = Cls[k,j,i] = np.dot(DlXXwithforegound,self.window_data[k,:,self.diag_to_flat[i,j]])
# Add noise contribution:
for k in range(nbins):
Cls[k,:,:] += self.cl_noise_matrix[k]
# Compute entries in X vector using the matrix transform
T = MatrixTransform(Cls[k,:,:], self.cl_hat_matrix[k], self.cl_fiducial_sqrt_matrix[k])
# Add flat version of T to the X vector
X[k*ncrossmaps:(k+1)*ncrossmaps] = T[self.flat_to_diag]
# Compute chi squared
chi2 = np.dot(X.T,np.dot(self.covmat_inverse,X))
return -0.5*chi2
def UpdateForegroundModel(self, cosmo, data):
"""
Update the foreground model.
"""
# Function to compute f_dust
def DustScaling(beta, Tdust, bandpass):
# Calculates greybody scaling of dust signal defined at 353 GHz to specified bandpass.
nu0 = 353 #Pivot frequency for dust (353 GHz).
# Integrate greybody scaling and thermodynamic temperature conversion across experimental bandpass.
gb_int = np.sum(bandpass['dnu']*bandpass['resp']*bandpass['nu']**(3+beta)/(np.exp(Ghz_Kelvin*bandpass['nu']/Tdust) - 1))
# Calculate values at pivot frequency.
gb0 = nu0**(3+beta) / (np.exp(Ghz_Kelvin*nu0/Tdust) - 1)
# Calculate and return dust scaling fdust.
return ((gb_int / gb0) / bandpass['th353'])
# Function to compute f_sync
def SyncScaling(beta, bandpass):
#Calculates power-law scaling of synchrotron signal defined at 150 GHz to specified bandpass.
nu0 = 23.0 # Pivot frequency for sync (23 GHz).
# Integrate power-law scaling and thermodynamic temperature conversion across experimental bandpass.
pl_int = np.sum( bandpass['dnu']*bandpass['resp']*bandpass['nu']**(2+beta))
# Calculate values at pivot frequency.
pl0 = nu0**(2+beta)
# Calculate and return dust scaling fsync.
return ((pl_int / pl0) / bandpass['th023'])
ellpivot = 80.
ell = np.arange(1,int(self.cl_lmax)+1)
# Convenience variables: store the nuisance parameters in short named variables
# for parname in self.use_nuisance:
# evalstring = parname+" = data.mcmc_parameters['"+parname+"']['current']*data.mcmc_parameters['"+parname+"']['scale']"
# print evalstring
BBdust = data.mcmc_parameters['BBdust']['current']*data.mcmc_parameters['BBdust']['scale']
BBsync = data.mcmc_parameters['BBsync']['current']*data.mcmc_parameters['BBsync']['scale']
BBalphadust = data.mcmc_parameters['BBalphadust']['current']*data.mcmc_parameters['BBalphadust']['scale']
BBbetadust = data.mcmc_parameters['BBbetadust']['current']*data.mcmc_parameters['BBbetadust']['scale']
BBTdust = data.mcmc_parameters['BBTdust']['current']*data.mcmc_parameters['BBTdust']['scale']
BBalphasync = data.mcmc_parameters['BBalphasync']['current']*data.mcmc_parameters['BBalphasync']['scale']
BBbetasync = data.mcmc_parameters['BBbetasync']['current']*data.mcmc_parameters['BBbetasync']['scale']
BBdustsynccorr = data.mcmc_parameters['BBdustsynccorr']['current']*data.mcmc_parameters['BBdustsynccorr']['scale']
# Store current EEtoBB conversion parameters.
self.EEtoBB_dust = data.mcmc_parameters['EEtoBB_dust']['current']*data.mcmc_parameters['EEtoBB_dust']['scale']
self.EEtoBB_sync = data.mcmc_parameters['EEtoBB_sync']['current']*data.mcmc_parameters['EEtoBB_sync']['scale']
# Compute fdust and fsync for each bandpass
self.fdust = {}
self.fsync = {}
for key, bandpass in self.bandpasses.iteritems():
self.fdust[key] = DustScaling(BBbetadust, BBTdust, bandpass)
self.fsync[key] = SyncScaling(BBbetasync, bandpass)
# Computes coefficients such that the foreground model is simply
# dust*self.dustcoeff+sync*self.synccoeff+dustsync*self.dustsynccoeff
# These coefficients are independent of the map used,
# so we save some time by computing them here.
self.dustcoeff = BBdust*(ell/ellpivot)**BBalphadust
self.synccoeff = BBsync*(ell/ellpivot)**BBalphasync
self.dustsynccoeff = BBdustsynccorr*np.sqrt(BBdust*BBsync)*(ell/ellpivot)**(0.5*(BBalphadust+BBalphasync))
| mit |
pramodh-bn/learn-data-edx | Week 7/qp100.py | 1 | 5618 | import numpy as np
from sklearn.svm import SVC
def getSample(pointA, pointB, numberOfPoints):
pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
sample = np.array([(i[0], i[1], isLeft(pointA, pointB, i)) for i in pointList])
y = sample[:,2]
breakpoint = False
while not breakpoint:
if(len(y[y==-1]) == 0 or len(y[y==1]) == 0):
pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
sample = np.array([(i[0], i[1], isLeft(pointA, pointB, i)) for i in pointList])
y = sample[:,2]
else:
breakpoint = True
return sample
def getRandomLine():
return list(zip(np.random.uniform(-1,1.00,2),np.random.uniform(-1,1.00,2)))
def getPoints(numberOfPoints):
pointList = list(zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints)))
return pointList
def isLeft(a, b, c):
return 1 if ((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0])) > 0 else -1;
def sign(x):
return 1 if x > 0 else -1
def getMisMatchesQP(data, clf):
#print(data)
data_x = np.c_[data[:,0], data[:,1]]
results = clf.predict(data_x)
#print(np.sign(results))
return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
def doMonteCarloQP(pointa, pointb, clf, nopoint):
#print "weights ", weight
points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
#print points
dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
#print dataset_Monte
return getMisMatchesQP(dataset_Monte, clf)
def doPLA(sample):
w = np.array([0,0,0])
iteration = 0
it = 0
while True:#(it < 10):
iteration = iteration + 1
it = it + 1
mismatch = list()
for i in sample:
#print("point in question ", i , " weight ", w)
yy = w[0] + w[1] * i[0] + w[2] * i[1]
#print("this is after applying weight to a point ",yy)
point = [i[0], i[1], sign(yy)]
if any(np.equal(sample, point).all(1)):
#print "point not in sample"
if(point[2] == -1):
mismatch.append((1, (i[0]), (i[1])))
else:
mismatch.append((-1, -(i[0]), -(i[1])))
#print " length ", len(mismatch), " mismatch list ",mismatch
if(len(mismatch) > 0):
#find a random point and update w
choiceIndex = np.random.randint(0, len(mismatch))
choice = mismatch[choiceIndex]
#print("choice ", choice)
w = w + choice
#print "new weight ", w
else:
break
#print("this is the iteration ", iteration)
#print("this is the weight ", w)
#montelist = [monetcarlo((x1,y1),(x2,y2),w,10000) for i in range(5)]
#print("Montelist " , montelist)
#monteavg = sum([i for i in montelist])/10
return w, iteration
def getMisMatches(data, weights):
#print data
list1 = np.empty(len(data))
list1.fill(weights[0])
results = list1+ weights[1]*data[:,0]+weights[2]*data[:,1]
results = -1 * results
return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
def doMonteCarloNP(pointa, pointb, weights, nopoint):
#print "weights ", weight
points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
#print points
dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
#print dataset_Monte
return getMisMatches(dataset_Monte, weights)
if __name__ == "__main__":
'''X = np.array([[-1,-1],[-2,-1], [1,1], [2,1]])
y = np.array([1,1,2,2])
clf = SVC()
clf.fit(X,y)
print(clf.predict([[-0.8,-1]]))'''
#clf = SVC()
clf = SVC(C = 1000, kernel = 'linear')
monteavgavgQP = list()
monteavgavgPLA = list()
approxavgQP = list()
for j in range(20):
#clf = SVC(C = 1000, kernel = 'linear')
monteavgQP = list()
monteavgPLA = list()
approxQP = list()
for k in range(1000):
nopoints = 100
line = getRandomLine()
sample = getSample(line[0], line[1], nopoints)
#print(sample)
X = np.c_[sample[:,0], sample[:,1]]
y = sample[:,2]
#print(y)
clf.fit(X,y)
w, it = doPLA(sample)
#print(len(clf.support_vectors_))
montelistQP = [doMonteCarloQP(line[0], line[1], clf, 10000) for i in range(1)]
qpMonte = sum(montelistQP)/len(montelistQP)
monteavgQP.append(sum(montelistQP)/len(montelistQP))
montelist = [ doMonteCarloNP(line[0], line[1], w, 10000) for i in range(1)]
plaMonte = sum(montelist)/len(montelist)
monteavgPLA.append(plaMonte)
if(montelistQP < monteavgPLA):
approxQP.append(1)
else:
approxQP.append(0)
print(sum(monteavgQP)/len(monteavgQP))
print(sum(monteavgPLA)/len(monteavgPLA))
print(sum(approxQP)/len(approxQP))
monteavgavgQP.append(sum(monteavgQP)/len(monteavgQP))
monteavgavgPLA.append(sum(monteavgPLA)/len(monteavgPLA))
approxavgQP.append(sum(approxQP)/len(approxQP))
print(sum(monteavgavgQP)/len(monteavgavgQP))
print(sum(monteavgavgPLA)/len(monteavgavgPLA))
print(sum(approxavgQP)/len(approxavgQP))
| unlicense |
arjoly/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
sonnyhu/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ChristianSch/skml | test/test_dataset.py | 1 | 1443 | from chai import Chai
from scipy import sparse
from sklearn.linear_model import LogisticRegression
from skml.problem_transformation.probabilistic_classifier_chain \
import ProbabilisticClassifierChain
from skml.datasets import load_dataset, sample_down_label_space
class TestDataset(Chai):
def test_load_yeast(self):
X, y = load_dataset('yeast')
def test_sample_down_label_space(self):
_, y = load_dataset('yeast')
sample10 = sample_down_label_space(y, 10)
assert sample10.shape[1] == 10
sample5 = sample_down_label_space(y, 5)
assert sample5.shape[1] == 5
self.assert_raises(ValueError, sample_down_label_space, y, 20)
def test_sparse_sample_down_label_space(self):
y = sparse.rand(200, 20, format='csc')
sample10 = sample_down_label_space(y, 10)
assert sample10.shape[1] == 10
def test_sparse_sample_down_label_space_classification(self):
clf = ProbabilisticClassifierChain(LogisticRegression())
# LogisticRegression needs dense
X = sparse.random(100, 15, format='csc').toarray()
_y = sparse.random(100, 20, format='csc')
y = sample_down_label_space(_y, 10)
y = y > 0.1
y = y.toarray().astype(int)
clf.fit(X, y)
y_pred = clf.predict(X)
assert y_pred.shape == y.shape
def test_load_enron(self):
X, y = load_dataset('enron', 'undivided')
| mit |
larsoner/mne-python | examples/simulation/plot_simulate_raw_data.py | 19 | 2830 | """
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source activation
multiple times.
"""
# Authors: Yousra Bekhti <yousra.bekhti@gmail.com>
# Mark Wronkiewicz <wronk.mark@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, Epochs, compute_covariance, make_ad_hoc_cov
from mne.datasets import sample
from mne.simulation import (simulate_sparse_stc, simulate_raw,
add_noise, add_ecg, add_eog)
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
# Load real data as the template
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
rng = np.random.RandomState(0) # random state (make reproducible)
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=rng)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
mne.viz.utils.plt_show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw.info, [stc] * 10, forward=fwd, verbose=True)
cov = make_ad_hoc_cov(raw_sim.info)
add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], random_state=rng)
add_ecg(raw_sim, random_state=rng)
add_eog(raw_sim, random_state=rng)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, tmin=-0.2, tmax=epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical',
verbose='error') # quick calc
evoked = epochs.average()
evoked.plot_white(cov, time_unit='s')
| bsd-3-clause |
jjx02230808/project0223 | sklearn/decomposition/tests/test_kernel_pca.py | 32 | 8066 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
daureg/illalla | utils.py | 1 | 6495 | #! /usr/bin/python2
# vim: set fileencoding=utf-8
from collections import Counter
from persistent import load_var
import json
import arguments
from random import uniform
import CommonMongo as cm
from geographiclib.geodesic import Geodesic
EARTH = Geodesic.WGS84
from datetime import datetime as dt
import numpy as np
def noise():
return uniform(0, 1e-6)
def to_css_hex(color):
"""
ie http://matplotlib.org/api/colors_api.html#matplotlib.colors.rgb2hex
>>> to_css_hex([1, 0, 1, .7])
'#ff00ff'
"""
r = '#'
for i in color[:-1]:
c = hex(int(255*i))[2:]
if len(c) == 2:
r += c
else:
r += '0' + c
return r
def photos_to_heat_dataset(city, precision=4, limit=300):
photos = load_var(city)
points = Counter([(round(p[0], precision), round(p[1], precision))
for p in photos])
maxi = points.most_common(1)[0][1]
dataset = [{'lat': p[1], 'lon': p[0], 'value': c}
for p, c in points.most_common(limit)]
json_dataset = json.dumps({'max': maxi, 'data': dataset})
with open(city+'.js', 'w') as f:
f.write('var {} = {}'.format(city, json_dataset))
def photos_to_cluster_dataset(city, limit=300):
photos = load_var(city)
points = [[p[0] + noise(), p[1] + noise(), 'Win!']
for p in photos[:limit]]
with open(city+'_cluster.js', 'w') as f:
f.write('var {}_cluster = {}'.format(city, str(points)))
def output_checkins(city, host=cm.HOST, port=cm.PORT):
"""Write a JS array of all checkins in `city` with their hour."""
checkins = cm.connect_to_db('foursquare', host, port)[0]['checkin']
query = cm.build_query(city, venue=False, fields=['loc', 'time'])
res = checkins.aggregate(query)['result']
def format_checkin(checkin):
"""Extract location (plus jitter) and hour from checkin"""
lng, lat = checkin['loc']['coordinates']
hour = checkin['time'].hour
return [lng + noise(), lat + noise(), hour]
formated = [str(format_checkin(c)) for c in res]
with open(city + '_fs.js', 'w') as output:
output.write('var helsinki_fs = [\n')
output.write(',\n'.join(formated))
output.write('];')
def get_nested(dico, fields, default=None):
"""If the key hierarchy of `fields` exists in `dico`, return its value,
otherwise `default`.
>>> get_nested({'loc': {'type': 'city'}}, ['loc', 'type'])
'city'
>>> get_nested({'type': 'city'}, 'type')
'city'
>>> get_nested({'loc': {'type': 'city'}}, ['loc', 'lat']) is None
True
>>> get_nested({'loc': {'type': None}}, ['loc', 'type']) is None
True
>>> get_nested({'l': {'t': {'a': 'h'}}}, ['l', 't', 'a'])
'h'
>>> get_nested({'l': {'t': None}}, ['l', 't', 'a'], 0)
0
>>> get_nested({'names': {'symbols': 'euro'}}, ['names', 'urls'], [])
[]
"""
if not hasattr(fields, '__iter__'):
return dico.get(fields, default)
current = dico
is_last_field = lambda i: i == len(fields) - 1
for index, field in enumerate(fields):
if not hasattr(current, 'get'):
return default if is_last_field(index) else current
current = current.get(field, default if is_last_field(index) else {})
return current
def xzip(items, fields):
"""Unpack each field of `fields` into a separate tuple for object in
`items`.
>>> xzip([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], ['a', 'b'])
[(1, 3), (2, 4)]
>>> xzip([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], ['b'])
[(2, 4)]
>>> xzip([], ['a', 'b'])
[[], []]
"""
unpack = lambda x: [x[f] for f in fields]
res = zip(*[unpack(x) for x in items])
if res == []:
return len(fields)*[[], ]
return res
def compute_entropy(c):
"""Compute entropy of a numpy array `c`."""
mask = c > 0
N = np.sum(c)
return np.log(N) - np.sum(c[mask]*np.log(c[mask]))/N
def human_day(time, new_day=4, period=True):
"""Return period of weekday of `time`, but using `new_day` hour as
separator instead of midnight.
>>> human_day(dt(2014, 3, 10, 8))
0
>>> human_day(dt(2014, 3, 10, 14))
1
>>> human_day(dt(2014, 3, 10, 22))
2
>>> human_day(dt(2014, 3, 11, 2))
2
>>> human_day(dt(2014, 3, 11, 6))
3
>>> human_day(dt(2014, 3, 17, 2))
20
"""
hour, day = time.hour, time.weekday()
if new_day <= hour < new_day + 24/3:
shift = 0
elif new_day + 24/3 <= hour < new_day + 2*24/3:
shift = 1
else:
shift = 2
if hour < new_day:
day = (day - 1) % 7
return day*3 + shift if period else day
def geodesic_distance(point_1, point_2):
"""Return the distance in meters between two JSON Points."""
assert 'coordinates' in point_1 and 'coordinates' in point_2
p1_lon, p1_lat = point_1['coordinates']
p2_lon, p2_lat = point_2['coordinates']
return EARTH.Inverse(p1_lat, p1_lon, p2_lat, p2_lon)['s12']
def answer_to_dict(cursor, transfo=None, default=None):
"""Take a `cursor` resulting from a mongo find query and return a
dictionary id: `transfo`(value) (provided that there is only one other
field) (or `default`)."""
try:
first = cursor.next()
except StopIteration:
return {}
transfo = transfo or (lambda x: x)
keys = first.keys()
assert '_id' in keys and len(keys) == 2
field_name = keys[(keys.index('_id') + 1) % 2]
res = {first['_id']: transfo(first.get(field_name, default))}
res.update({v['_id']: transfo(v.get(field_name, default)) for v in cursor})
return res
def convert_icwsm_checkin(checkins):
"""Harmonize user and id fields between old and new checkins"""
limit = dt(2014, 1, 1)
for old in checkins.find({'time': {'$lte': limit}}):
_id, uid = old['_id'], str(old['uid'])
checkins.update({'_id': _id}, {'$set': {'tuid': uid, 'tid': _id}})
def memodict(f):
"""Memoization decorator for a function taking a single argument """
# http://code.activestate.com/recipes/578231
class memodict(dict):
def __missing__(self, key):
ret = self[key] = f(key)
return ret
return memodict().__getitem__
if __name__ == '__main__':
import doctest
doctest.testmod()
#pylint: disable=C0103
args = arguments.get_parser().parse_args()
foursquare = cm.connect_to_db('foursquare', args.host, args.port)[0]
convert_icwsm_checkin(foursquare.checkin)
| mit |
ddervs/GreenGraph | greengraph/classes/Map.py | 1 | 1580 | import numpy as np
import requests
from StringIO import StringIO
from matplotlib import image as img
class Map(object):
def __init__(self, latitude, longitude, satellite=True,
zoom=10, size=(400, 400), sensor=False):
base = "http://maps.googleapis.com/maps/api/staticmap?"
params = dict(
sensor=str(sensor).lower(),
zoom=zoom,
size="x".join(map(str, size)),
center=",".join(map(str, (latitude, longitude))),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"] = "satellite"
self.image = requests.get(base, params=params).content
# Fetch our PNG image data
self.pixels = img.imread(StringIO(self.image))
# Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 0]
greener_than_blue = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold=1.1):
return np.sum(self.green(threshold))
def show_green(self, threshold=1.1):
green = self.green(threshold)
out = green[:, :, np.newaxis] * np.array([0, 1, 0])[np.newaxis, np.newaxis, :]
my_buffer = StringIO()
img.imsave(my_buffer, out, format='png')
return my_buffer.getvalue()
| mit |
cython-testbed/pandas | pandas/tests/io/parser/compression.py | 2 | 4740 | # -*- coding: utf-8 -*-
"""
Tests compressed data parsing functionality for all
of the parsers defined in parsers.py
"""
import pytest
import pandas as pd
import pandas.compat as compat
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import gzip
import bz2
try:
lzma = compat.import_lzma()
except ImportError:
lzma = None
class CompressionTests(object):
def test_zip(self):
import zipfile
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean('test_file.zip') as path:
with zipfile.ZipFile(path, mode='w') as tmp:
tmp.writestr('test_file', data)
result = self.read_csv(path, compression='zip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
if self.engine is not 'python':
with open(path, 'rb') as f:
result = self.read_csv(f, compression='zip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('combined_zip.zip') as path:
inner_file_names = ['test_file', 'second_file']
with zipfile.ZipFile(path, mode='w') as tmp:
for file_name in inner_file_names:
tmp.writestr(file_name, data)
tm.assert_raises_regex(ValueError, 'Multiple files',
self.read_csv, path, compression='zip')
tm.assert_raises_regex(ValueError, 'Multiple files',
self.read_csv, path,
compression='infer')
with tm.ensure_clean() as path:
with zipfile.ZipFile(path, mode='w') as tmp:
pass
tm.assert_raises_regex(ValueError, 'Zero files',
self.read_csv, path, compression='zip')
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
pytest.raises(zipfile.BadZipfile, self.read_csv,
f, compression='zip')
@pytest.mark.parametrize('compress_type, compress_method, ext', [
('gzip', gzip.GzipFile, 'gz'),
('bz2', bz2.BZ2File, 'bz2'),
pytest.param('xz', getattr(lzma, 'LZMAFile', None), 'xz',
marks=td.skip_if_no_lzma)
])
def test_other_compression(self, compress_type, compress_method, ext):
with open(self.csv1, 'rb') as data_file:
data = data_file.read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
with compress_method(path, mode='wb') as tmp:
tmp.write(data)
result = self.read_csv(path, compression=compress_type)
tm.assert_frame_equal(result, expected)
if compress_type == 'bz2':
pytest.raises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
result = self.read_csv(fin, compression=compress_type)
tm.assert_frame_equal(result, expected)
with tm.ensure_clean('test.{}'.format(ext)) as path:
with compress_method(path, mode='wb') as tmp:
tmp.write(data)
result = self.read_csv(path, compression='infer')
tm.assert_frame_equal(result, expected)
def test_read_csv_infer_compression(self):
# see gh-9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
with open(self.csv1) as f:
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', f]
for inp in inputs:
df = self.read_csv(inp, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
def test_read_csv_compressed_utf16_example(self, datapath):
# GH18071
path = datapath('io', 'parser', 'data', 'utf16_ex_small.zip')
result = self.read_csv(path, encoding='utf-16',
compression='zip', sep='\t')
expected = pd.DataFrame({
u'Country': [u'Venezuela', u'Venezuela'],
u'Twitter': [u'Hugo Chávez Frías', u'Henrique Capriles R.']
})
tm.assert_frame_equal(result, expected)
def test_invalid_compression(self):
msg = 'Unrecognized compression type: sfark'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv('test_file.zip', compression='sfark')
| bsd-3-clause |
biln/airflow | airflow/hooks/base_hook.py | 18 | 2571 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
landmanbester/Copernicus | Plotter.py | 1 | 19513 | #!/usr/bin/env python
import numpy as np
from scipy.interpolate import UnivariateSpline as uvs
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 14, 'font.family': 'serif'})
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
from genFLRW import FLRW
from Master import SSU
from My2Ddist import plot2Ddist2 as pl2d
from matplotlib.patches import Rectangle
from Copernicus.Parset import MyOptParse
class plh(object):
def __init__(self, samps, ax, delzeros=False):
self.ax = ax
# check for all zeros
if delzeros:
I = np.argwhere(samps[-1, :] == 0)
if I.size > 0:
print "Found ", I.size, "zeros. Deleting"
samps = np.delete(samps, I, axis=1)
# Check for nans
if np.isnan(samps).any():
I = np.argwhere(np.isnan(samps))
Iy = np.unique(I[:,1])
print "Found ", Iy.size, "NaN's. Deleting"
samps = np.delete(samps, Iy, axis=1)
self.samps = samps
# get contours
self.contours = self.get_Conf()
def get_Conf(self):
nstar, npoints = self.samps.shape
contours = np.zeros([nstar, 5])
for i in range(nstar):
x = np.sort(self.samps[i, :])
cdf = ECDF(x)
# xgrid = x[0] + x[-1]*self.l
# for j in range(Ngrid):
# cdf[j] = (sum(x <= xgrid[j]) + 0.0)/npoints
Im = np.argwhere(cdf.y <= 0.5)[-1] # Mean
contours[i, 0] = cdf.x[Im]
Id = np.argwhere(cdf.y <= 0.16)[-1] # lower 1sig
contours[i, 1] = cdf.x[Id]
Idd = np.argwhere(cdf.y <= 0.025)[-1] # lower 2sig
contours[i, 3] = cdf.x[Idd]
Iu = np.argwhere(cdf.y <= 0.84)[-1] # upper 1sig
contours[i, 2] = cdf.x[Iu]
Iuu = np.argwhere(cdf.y <= 0.975)[-1] # upper 2sig
contours[i, 4] = cdf.x[Iuu]
return contours
def add_data(self, x, y, sy, alp=0.5, scale=1.0, format='xr', lab=None):
self.ax.errorbar(x, y * scale, sy * scale, fmt=format, alpha=alp, label=lab)
return
def add_plot(self, x, y, col, lab, scale=1.0, wid=1.0):
self.ax.plot(x, y * scale, col, label=lab, lw=wid)
return
def set_lims(self, xlow, xhigh, ylow, yhigh):
self.ax.set_xlim(xlow, xhigh)
self.ax.set_ylim(ylow, yhigh)
return
def set_label(self, xlab, xfnt, ylab, yfnt):
self.ax.set_xlabel(xlab, fontsize=xfnt)
self.ax.set_ylabel(ylab, fontsize=yfnt)
return
def show_lab(self, x, only_2sig=False):
handles, labels = self.ax.get_legend_handles_labels()
if not only_2sig:
p1 = Rectangle((0, 0), 1, 1, fc="blue", alpha=0.8)
handles.append(p1)
labels.append(r'$1-\sigma$')
p2 = Rectangle((0, 0), 1, 1, fc="blue", alpha=0.5)
handles.append(p2)
labels.append(r'$2-\sigma$')
# [p1, p2], [r'$1-\sigma$',r'$2-\sigma$']
self.ax.legend(handles, labels, loc=x)
return
def draw_Contours(self, x, scale=1, smooth=0.0, alp=0.5, mode='Normal', only_2sig=False, colour='blue', draw_median=True):
if (smooth != 0.0):
Fm = uvs(x, self.contours[:, 0], k=3, s=smooth)(x)
Flow1 = uvs(x, self.contours[:, 1], k=3, s=smooth)(x)
Flow2 = uvs(x, self.contours[:, 3], k=3, s=smooth)(x)
Fhigh1 = uvs(x, self.contours[:, 2], k=3, s=smooth)(x)
Fhigh2 = uvs(x, self.contours[:, 4], k=3, s=smooth)(x)
else:
Fm = self.contours[:, 0]
Flow1 = self.contours[:, 1]
Flow2 = self.contours[:, 3]
Fhigh1 = self.contours[:, 2]
Fhigh2 = self.contours[:, 4]
self.ax.fill_between(x, Fhigh2 * scale, Flow2 * scale, facecolor=colour, edgecolor=colour, alpha=alp,
label=r'$2-\sigma$')
if not only_2sig:
self.ax.fill_between(x, Fhigh1 * scale, Flow1 * scale, facecolor=colour, edgecolor=colour, alpha=alp, label=r'$1-\sigma$')
if draw_median:
if mode == 'Cheat':
xc = np.linspace(x[0], x[-2], x.size)
Fm = uvs(x, Fm, k=3, s=smooth)(xc)
self.ax.plot(x, Fm * scale, colour, label=r'$Median$', alpha=1.0)
return
def draw_Upper(self, x, F_cut, F_LTB, scale=1, alp=0.5):
Fhigh2 = self.contours[:, 4]
self.ax.fill_between(x, Fhigh2 * scale, F_cut * scale, facecolor='gray', edgecolor='gray', alpha=0.5,
label=r'$2-\sigma$', lw=0.0)
self.ax.fill_between(x, F_cut * scale, np.zeros(x.size)+1e-16, facecolor='gray', edgecolor='gray', alpha=0.9,
label=r'$FLRW \ uv-cut=200Mpc$', lw=0.0)
#self.ax.plot(x, self.contours[:, 0] * scale, 'blue', label=r'$Median$', alpha=1.0)
#self.ax.plot(x, F_LTB, 'm', label=r'$LTB \ (t_B = 0)$', lw=1.5)
# handles, labels = self.ax.get_legend_handles_labels()
# p1 = Rectangle((0, 0), 1, 1, fc="red", alpha=alp)
# handles.append(p1)
# labels.append(r'$FLRW \ uv-cut=200Mpc$')
# p2 = Rectangle((0, 0), 1, 1, fc="blue", alpha=alp)
# handles.append(p2)
# labels.append(r'$Upper \ 2-\sigma$')
# # [p1, p2], [r'$1-\sigma$',r'$2-\sigma$']
# self.ax.legend(handles, labels, loc=2)
return
def Plot_Data(zmax,Np,Nret,tmin,err,data_prior,data_lik,fname,Nsamp):
print "Getting LCDM vals"
# Get FLRW funcs for comparison
Om0 = 0.3
OL0 = 0.7
H0 = 0.2335
LCDM = FLRW(Om0, OL0, H0, zmax, Np)
HzF = LCDM.Hz
rhozF = LCDM.getrho()
# sigmasqFz10 = LCDM.get_sigmasq(2.41e-9, 0.1)*HzF**2
# sigmasqFz20 = LCDM.get_sigmasq(2.41e-9, 0.05) * HzF ** 2
# sigmasqFz50 = LCDM.get_sigmasq(2.41e-9, 0.02) * HzF ** 2
sigmasqFz100 = LCDM.get_sigmasq(2.41e-9, 0.005) * HzF ** 2
v = LCDM.getnuz()
# sigmasq10o = uvs(v/v[-1], sigmasqFz10, k =3, s=0.0)
# sigmasq20o = uvs(v/v[-1], sigmasqFz20, k =3, s=0.0)
# sigmasq50o = uvs(v/v[-1], sigmasqFz50, k =3, s=0.0)
sigmasq100o = uvs(v / v[-1], sigmasqFz100, k=3, s=0.0)
#sigmasqiF = sigmasqo(np.linspace(0, 1, Nret))
# Do integration of FLRW funcs
zp = np.linspace(0, zmax, Np)
#zp2 = np.linspace(0, zmax, 200)
LamF = 3 * 0.7 * 0.2335 ** 2
Xrho = np.array([0.5,2.8])
XH = np.array([0.6,3.5])
#set characteristic variance of Lambda prior (here 60%)
sigmaLam = 0.6*3*0.7*(70.0/299.79)**2
# Do LCDM integration
UF = SSU(zmax, tmin, Np, err, XH, Xrho, sigmaLam, Nret, data_prior, data_lik, fname, Hz=HzF, rhoz=rhozF, Lam=LamF, useInputFuncs=True)
# Get quantities of interrest
T1iF, T1fF, T2iF, T2fF, LLTBConsiF, LLTBConsfF, DiF, DfF, SiF, \
SfF, QiF, QfF, AiF, AfF, ZiF, ZfF, SpiF, SpfF, QpiF, QpfF, \
ZpiF, ZpfF, uiF, ufF, upiF, upfF, uppiF, uppfF, udotiF, udotfF, \
rhoiF, rhofF, rhopiF, rhopfF, rhodotiF, rhodotfF, DzF, dzdwzF, sigmasqiF, sigmasqfF = UF.get_funcs()
# sigmasqiF10 = sigmasq10o(np.linspace(0, 1, Nret))
# sigmasqiF20 = sigmasq20o(np.linspace(0, 1, Nret))
sigmasqiF100 = sigmasq100o(np.linspace(0, 1, Nret))
# Do LTB integration
print "Getting LTB vals"
#LTB_z_funcs = np.load(fname + 'Processed_Data/LTB_z_funcs.npz')
LTB_z_funcs = np.load(fname + 'Processed_Data/ConLTBDat.npz')
print LTB_z_funcs.keys()
HzLT = LTB_z_funcs['Hz']
rhozLT = LTB_z_funcs['rhoz']
zLT = LTB_z_funcs['z']
HzLT = uvs(zLT,HzLT,k=3,s=0.0)(zp)
rhozLT = uvs(zLT, rhozLT, k=3, s=0.0)(zp)
# plt.figure('Hz')
# plt.plot(zp,HzLT,'b')
# plt.plot(zp,HzF,'g')
# plt.savefig('/home/landman/Projects/CP_LCDM/Figures/LTBvLCDM_Hz.png',dpi=200)
# plt.figure('rhoz')
# plt.plot(zp,rhozLT,'b')
# plt.plot(zp,rhozF,'g')
# plt.savefig('/home/landman/Projects/CP_LCDM/Figures/LTBvLCDM_rhoz.png', dpi=200)
ULT = SSU(zmax, tmin, Np, err, XH, Xrho, sigmaLam, Nret, data_prior, data_lik, fname, Hz=HzLT, rhoz=rhozLT, Lam=0.0, useInputFuncs=True)
# Get quantities of interrest
print "Getting quantities of interest"
T1iLT, T1fLT, T2iLT, T2fLT, LLTBConsiLT, LLTBConsfLT, DiLT, DfLT, SiLT, \
SfLT, QiLT, QfLT, AiLT, AfLT, ZiLT, ZfLT, SpiLT, SpfLT, QpiLT, QpfLT, \
ZpiLT, ZpfLT, uiLT, ufLT, upiLT, upfLT, uppiLT, uppfLT, udotiLT, udotfLT, \
rhoiLT, rhofLT, rhopiLT, rhopfLT, rhodotiLT, rhodotfLT, DzLT, dzdwzLT, sigmasqiLT, sigmasqfLT = ULT.get_funcs()
# read in data
zD, Dz, sDz = np.loadtxt(fname + 'Data/D.txt', unpack=True)
zH, Hz, sHz = np.loadtxt(fname + 'Data/H.txt', unpack=True)
zrho, rhoz, srhoz = np.loadtxt(fname + 'Data/rho.txt', unpack=True)
zdzdw, dzdwz, sdzdwz = np.loadtxt(fname + 'Data/dzdw.txt', unpack=True)
# Load first samples
print "Loading Samps"
holder = np.load(fname + 'Processed_Data/Samps.npz')
Dzlist = holder['Dz']
Hzlist = holder['Hz']
rhozlist = holder['rhoz']
dzdwzlist = holder['dzdwz']
Lamlist = holder['Lam']
T2ilist = holder['T2i']
T2flist = holder['T2f']
T1ilist = holder['T1i']
T1flist = holder['T1f']
sigmasqilist = holder['sigmasqi']
sigmasqflist = holder['sigmasqf']
LLTBConsilist = holder['LLTBConsi']
LLTBConsflist = holder['LLTBConsf']
NSamplers = holder['NSamplers']
# Load the rest of the data
for i in xrange(NSamplers):
if i > 0:
Dzsamps = np.append(Dzsamps, Dzlist[i], axis=1)
Hzsamps = np.append(Hzsamps, Hzlist[i], axis=1)
rhozsamps = np.append(rhozsamps, rhozlist[i], axis=1)
dzdwzsamps = np.append(dzdwzsamps, dzdwzlist[i], axis=1)
Lamsamps = np.append(Lamsamps, Lamlist[i])
T2i = np.append(T2i, T2ilist[i], axis=1)
T2f = np.append(T2f, T2flist[i], axis=1)
T1i = np.append(T1i, T1ilist[i], axis=1)
T1f = np.append(T1f, T1flist[i], axis=1)
sigmasqi = np.append(sigmasqi, sigmasqilist[i], axis=1)
sigmasqf = np.append(sigmasqf, sigmasqflist[i], axis=1)
LLTBConsi = np.append(LLTBConsi, LLTBConsilist[i], axis=1)
LLTBConsf = np.append(LLTBConsf, LLTBConsflist[i], axis=1)
else:
Dzsamps = Dzlist[0]
Hzsamps = Hzlist[0]
rhozsamps = rhozlist[0]
dzdwzsamps = dzdwzlist[0]
Lamsamps = Lamlist[0]
T2i = T2ilist[0]
T2f = T2flist[0]
T1i = T1ilist[0]
T1f = T1flist[0]
sigmasqi = sigmasqilist[0]
sigmasqf = sigmasqflist[0]
LLTBConsi = LLTBConsilist[0]
LLTBConsf = LLTBConsflist[0]
Om0samps = 8 * np.pi * rhozsamps[0,:] / (3 * Hzsamps[0,:] ** 2)
OL0samps = Lamsamps / (3 * Hzsamps[0,:] ** 2)
# 3 2x2 figures with functions contours
# The first is for data on the PLC0
figPLC0, axPLC0 = plt.subplots(nrows=2, ncols=2, figsize=(15, 9), sharex=True)
# The second for CP tests
figCP, axCP = plt.subplots(nrows=2, ncols=2, figsize=(15, 9), sharex=True, sharey=True)
# The third for t slice
figsigmasq, axsigmasq = plt.subplots(nrows=1, ncols=1, figsize=(11, 11), sharex=True)
#Get contours and set figure labels and lims
print 'PLC0'
Dplh = plh(Dzsamps, axPLC0[0, 0])
axPLC0[0, 0].set_ylabel(r'$ D / [Gpc]$', fontsize=20)
axPLC0[0, 0].set_ylim(0.0, 2.0)
Hplh = plh(Hzsamps, axPLC0[0, 1])
axPLC0[0, 1].set_ylabel(r'$ H_\parallel / [km s^{-1} Mpc^{-1}]$', fontsize=20)
axPLC0[0, 1].set_ylim(65, 220.0)
rhoplh = plh(rhozsamps, axPLC0[1, 0])
axPLC0[1, 0].set_xlabel(r'$z$', fontsize=20)
axPLC0[1, 0].set_xlim(0, zmax)
axPLC0[1, 0].set_ylabel(r'$\frac{\rho}{\rho_c} $', fontsize=30)
axPLC0[1, 0].set_ylim(0, 10.0)
dzdwplh = plh(dzdwzsamps, axPLC0[1, 1])
axPLC0[1, 1].set_xlabel(r'$z$', fontsize=20)
axPLC0[1, 1].set_xlim(0, zmax)
axPLC0[1, 1].set_ylabel(r'$ \frac{\delta z}{\delta w} / [Gyr^{-1}] $', fontsize=20)
#axPLC0[1, 1].set_ylim(-1.25, 0.125)
print 'CP'
T1iplh = plh(T1i, axCP[0, 0])
axCP[0, 0].set_ylabel(r'$ T_1 $', fontsize=20)
T1fplh = plh(T1f, axCP[0, 1])
T2iplh = plh(T2i, axCP[1, 0])
axCP[1, 0].set_ylabel(r'$ T_2 $', fontsize=20)
axCP[1, 0].set_xlabel(r'$ \frac{v}{v_{max}} $', fontsize=20)
axCP[1, 0].set_xlim(0.0, 1.0)
axCP[1, 0].set_ylim(-0.8, 0.3)
T2fplh = plh(T2f, axCP[1, 1])
axCP[1, 1].set_xlabel(r'$ \frac{v}{v_{max}} $', fontsize=20)
print 'sigmasq'
sigmasqiplh = plh(sigmasqi, axsigmasq)
axsigmasq.set_ylabel(r'$ \sigma^2_iD^2_i $', fontsize=20)
axsigmasq.set_xlabel(r'$ \frac{z}{z_{max}}$', fontsize=20)
#axsigmasq[0, 0].set_ylim(0, 1.5)
#sigmasqfplh = plh(sigmasqf, axsigmasq[1])
#axsigmasq[1].set_ylabel(r'$ \sigma^2_fD^2_f $', fontsize=20)
#axsigmasq[0, 1].set_ylim(0.4, 1.0)
#
# rhosplh = plh(rhostar, axts[1, 0])
# axts[1, 0].set_ylabel(r'$ \frac{\rho^*}{\rho_c} $', fontsize=30)
# axts[1, 0].set_xlabel(r'$ \frac{r}{r_{max}} $', fontsize=20)
# axts[1, 0].set_xlim(0, 1)
# axts[1, 0].set_ylim(0.0, 1.8)
#
# Hperpsplh = plh(Hperpstar, axts[1, 1])
# axts[1, 1].set_ylabel(r'$ H_{\perp}^* / [km s^{-1} Mpc^{-1}] $', fontsize=20)
# axts[1, 1].set_xlabel(r'$ \frac{r}{r_{max}} $', fontsize=20)
# axts[1, 1].set_ylim(70, 100)
# Plot contours
print "Plotting"
l = np.linspace(0, 1, Nret)
# Plot mu(z) reconstruction and comparison
Dplh.draw_Contours(zp)
Dplh.add_plot(zp, DzF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
Dplh.add_plot(zp, DzLT,col='m',lab=r'$LTB$',wid=1.5)
Dplh.add_data(zD, Dz, sDz, alp=0.2)
Dplh.show_lab(4)
# Plot H(z) reconstruction and comparison
Hplh.draw_Contours(zp, scale=299.8)
Hplh.add_plot(zp, HzF, col='k', scale=299.8, lab=r'$\Lambda CDM$', wid=1.5)
Hplh.add_plot(zp,HzLT,col='k',scale=299.8,lab=r'$LTB$',wid=1.5)
Hplh.add_data(zH, Hz, sHz, scale=299.8, alp=0.5)
Hplh.show_lab(4)
# Plot rho(z) reconstruction and comparison
rhoplh.draw_Contours(zp, scale=153.66)
rhoplh.add_plot(zp, rhozF, col='k', scale=153.66, lab=r'$\Lambda CDM$', wid=1.5)
rhoplh.add_plot(zp,rhozLT,col='k',scale=153.66,lab=r'$LTB$',wid=1.5)
rhoplh.add_data(zrho, rhoz, srhoz, alp=0.5, scale=153.66)
rhoplh.show_lab(2)
# Plot dzdw(z) reconstruction and comparison
dzdwplh.draw_Contours(zp)
dzdwplh.add_plot(zp, dzdwzF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
dzdwplh.add_plot(zp, dzdwzLT,col='m',lab=r'$LTB$',wid=1.5)
dzdwplh.add_data(zdzdw,dzdwz,sdzdwz,alp=0.5)
dzdwplh.show_lab(3)
# Plot T2i(v) reconstruction and comparison
T2iplh.draw_Contours(l)
T2iplh.add_plot(l, T2iF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
T2iplh.add_plot(l, T2iLT, col='k', lab=r'$LTB$', wid=1.5)
# Plot T2f(v) reconstruction and comparison
T2fplh.draw_Contours(l)
T2fplh.add_plot(l, T2fF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
T2fplh.add_plot(l, T2fLT, col='k', lab=r'$LTB$', wid=1.5)
T2fplh.show_lab(2)
# Plot T1i(v) reconstruction and comparison
T1iplh.draw_Contours(l)
T1iplh.add_plot(l, T1iF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
T1iplh.add_plot(l, T1iLT, col='k', lab=r'$LTB$', wid=1.5)
# Plot T1f(v) reconstruction and comparison
T1fplh.draw_Contours(l)
T1fplh.add_plot(l, T1fF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
T1fplh.add_plot(l, T1fLT, col='k', lab=r'$LTB$', wid=1.5)
# # Plot rhostar reconstruction and comparison
# rhosplh.draw_Contours(l, scale=153.66)
# rhosplh.add_plot(l, rhostarF, col='k', scale=153.66, lab=r'$\Lambda CDM$', wid=1.5)
# # rhosplh.add_plot(l,rhostarConLTB,col='k',scale=153.66,lab=r'$LTB1$',wid=1.5)
# # rhosplh.add_plot(l,rhostarLTB,col='m',scale=153.66,lab=r'$LTB2$',wid=1.5)
# # rhosplh.show_lab(2)
# Plot sigmasqi reconstruction
sigmasqiplh.draw_Upper(l, sigmasqiF100, sigmasqiLT)
#sigmasqiplh.add_plot(l, sigmasqiF10, col='k', lab=r'$\Lambda CDM \ uv-cut=10$', wid=1.5)
#sigmasqiplh.add_plot(l, sigmasqiF20, col='y', lab=r'$\Lambda CDM \ uv-cut=20$', wid=1.5)
#sigmasqiplh.add_plot(l, sigmasqiF100, col='c', lab=r'$\Lambda CDM \ uv-cut=100Mpc$', wid=1.5)
#sigmasqiplh.add_plot(l, sigmasqiLT,col='m',lab=r'$t_B = 0 \ LTB$',wid=1.5)
axsigmasq.set_yscale('log')
axsigmasq.set_ylim(1e-13, 1e-2)
#sigmasqiplh.show_lab(0)
# # Plot Xstar reconstruction
# sigmasqfplh.draw_Contours(l)
# sigmasqfplh.add_plot(l, sigmasqfF, col='k', lab=r'$\Lambda CDM$', wid=1.5)
# sigmasqfplh.add_plot(l, sigmasqfLT,col='m',lab=r'$LTB$',wid=1.5)
# sigmasqfplh.show_lab(2)
# # Plot Xstar reconstruction
# Hperpsplh.draw_Contours(l, scale=299.8)
# Hperpsplh.add_plot(l, HperpF * 299.8, col='k', lab=r'$\Lambda CDM$', wid=1.5)
# # Xsplh.add_plot(l,XstarConLTB,col='k',lab=r'$LTB1$',wid=1.5)
# # Xsplh.add_plot(l,XstarLTB,col='m',lab=r'$LTB2$',wid=1.5)
# # Hperpsplh.show_lab(4)
#figPLC0.tight_layout(pad=1.08, h_pad=0.0, w_pad=0.6)
figCP.tight_layout(pad=1.08, h_pad=0.0, w_pad=0.0)
#figts.tight_layout(pad=1.08, h_pad=0.0, w_pad=0.6)
figPLC0.savefig(fname + 'Figures/PLC0.png', dpi=250)
figCP.savefig(fname + 'Figures/CP.png', dpi=250)
figsigmasq.savefig(fname + 'Figures/sigmasq.png', dpi=500)
# Do contour plots
print "Doing Om v OL contours"
figConts, axConts = plt.subplots(nrows=1, ncols=2, figsize=(15, 9))
# First Om v OL
pl2d(Om0samps, OL0samps, axConts[0])
axConts[0].plot(l, 1 - l, 'k', label='Flat', alpha=0.5)
axConts[0].set_xlabel(r'$\Omega_{m0}$', fontsize=25)
axConts[0].set_ylabel(r'$\Omega_{\Lambda 0}$', fontsize=25)
axConts[0].set_xlim(0.0, 1.0)
axConts[0].set_ylim(0.0, 1.5)
handles, labels = axConts[0].get_legend_handles_labels()
p1 = Rectangle((0, 0), 1, 1, fc="blue", alpha=0.8)
handles.append(p1)
labels.append(r'$1-\sigma$')
p2 = Rectangle((0, 0), 1, 1, fc="blue", alpha=0.5)
handles.append(p2)
labels.append(r'$2-\sigma$')
axConts[0].legend(handles, labels, loc=1)
#
# pl2d(t0samps / 0.3064, Lamsamps, axConts[1])
axConts[1].hist2d(Om0samps,OL0samps)
# axConts[1].set_xlabel(r'$t_0 /[Gyr]$', fontsize=25)
# axConts[1].set_ylabel(r'$\Lambda$', fontsize=25)
# axConts[1].set_xlim(10, 20)
# axConts[1].set_ylim(0.0, 0.25)
# handles, labels = axConts[1].get_legend_handles_labels()
# p1 = Rectangle((0, 0), 1, 1, fc="blue", alpha=0.8)
# handles.append(p1)
# labels.append(r'$1-\sigma$')
# p2 = Rectangle((0, 0), 1, 1, fc="blue", alpha=0.5)
# handles.append(p2)
# labels.append(r'$2-\sigma$')
# axConts[1].legend(handles, labels, loc=1)
figConts.savefig(fname + 'Figures/Contours.png', dpi=250)
if __name__=="__main__":
# Get input args
GD = MyOptParse.readargs()
#Determine how many samplers to spawn
NSamplers = GD["nwalkers"]
Nsamp = GD["nsamples"]
Nburn = GD["nburnin"]
tstar = GD["tstar"]
DoPLCF = GD["doplcf"]
DoTransform = GD["dotransform"]
fname = GD["fname"]
data_prior = GD["data_prior"]
data_lik = GD["data_lik"]
zmax = GD["zmax"]
Np = GD["np"]
Nret = GD["nret"]
err = GD["err"]
# Do the plots
Plot_Data(zmax,Np,Nret,tstar,err,data_prior,data_lik,fname,Nsamp) | gpl-3.0 |
anirudhjayaraman/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
mantidproject/mantid | qt/python/mantidqt/widgets/test/test_fitpropertybrowserplotinteraction.py | 3 | 10600 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest.mock import Mock, MagicMock, ANY
from matplotlib.lines import Line2D
from mantid.plots import MantidAxes
from mantid.simpleapi import CreateSampleWorkspace
from mantidqt.widgets.fitpropertybrowser import FitPropertyBrowser
from mantidqt.widgets.fitpropertybrowser.fitpropertybrowserplotinteraction import FitPropertyBrowserPlotInteraction
from mantid.api import AnalysisDataService, FunctionFactory, WorkspaceFactory
import matplotlib
matplotlib.use('AGG') # noqa
X_COLUMN_LABEL = 'x_column'
Y_COLUMN_LABEL = 'y_column'
FULL_FUNCTION = FunctionFactory.createInitialized("name=FlatBackground,A0=1;name=LinearBackground,A0=1,"
"A1=2;name=GausOsc,A=0.2,Sigma=0.2,Frequency=0.1,Phi=0")
FUNCTION_1 = FunctionFactory.createInitialized("name=FlatBackground,A0=1")
FUNCTION_2 = FunctionFactory.createInitialized("name=LinearBackground,A0=1,A1=2")
FUNCTION_3 = FunctionFactory.createInitialized("name=GausOsc,A=0.2,Sigma=0.2,Frequency=0.1,Phi=0")
class FitPropertyBrowserPlotInteractionTest(unittest.TestCase):
def setup_mock_fit_browser(self, workspace_creator, workspace_name, function, function_prefix):
workspace_creator(workspace_name)
self.fit_browser.workspaceName = Mock(return_value=workspace_name)
self.fit_browser.currentHandler.return_value = self.create_mock_handler(function, function_prefix)
def create_table_workspace(self, table_name):
table = WorkspaceFactory.createTable()
table.addColumn('double', X_COLUMN_LABEL, 1)
table.addColumn('double', Y_COLUMN_LABEL, 2)
for i in range(1, 10):
table.addRow([0.1 * i, 5])
AnalysisDataService.Instance().addOrReplace(table_name, table)
self.fit_browser.getXColumnName.return_value = X_COLUMN_LABEL
self.fit_browser.getYColumnName.return_value = Y_COLUMN_LABEL
self.fit_browser.getErrColumnName.return_value = None
self.fit_browser.startX.return_value = 0.15
self.fit_browser.endX.return_value = 0.95
def create_workspace2D(self, workspace_name):
CreateSampleWorkspace(OutputWorkspace=workspace_name)
self.fit_browser.workspaceIndex.return_value = 1
self.fit_browser.startX.return_value = 0
self.fit_browser.endX.return_value = 20000
def create_mock_handler(self, function, function_prefix):
mock_handler = MagicMock()
mock_handler.ifun = MagicMock(return_value=function)
mock_handler.functionPrefix = MagicMock(return_value=function_prefix)
return mock_handler
def create_mock_guess_lines(self):
line_1, line_2, line_3 = MagicMock(spec=Line2D), MagicMock(spec=Line2D), MagicMock(spec=Line2D)
mock_lines = [("f0." + FUNCTION_1.name(), line_1), ("f1." + FUNCTION_2.name(), line_2),
("f2." + FUNCTION_3.name(), line_3)]
self.browser_plot_interaction.guess_lines = dict(mock_lines)
return line_1, line_2, line_3
def setUp(self):
self.fit_browser = MagicMock(spec=FitPropertyBrowser)
self.fit_browser.getFittingFunction = Mock(return_value=FULL_FUNCTION)
# Mock figure
self.canvas = MagicMock()
self.figure = MagicMock()
self.axes = MagicMock(spec=MantidAxes)
self.figure.get_axes.return_value = [self.axes]
self.canvas.figure = self.figure
self.browser_plot_interaction = FitPropertyBrowserPlotInteraction(self.fit_browser, self.canvas)
def tearDown(self):
AnalysisDataService.clear()
def test_plot_guess_all_evaluates_correct_function(self):
workspace_name = "test_workspace"
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FULL_FUNCTION, "")
self.browser_plot_interaction.evaluate_function = Mock()
self.browser_plot_interaction.plot_guess_all()
self.browser_plot_interaction.evaluate_function.assert_called_once_with(workspace_name, FULL_FUNCTION,
workspace_name + '_guess')
def test_plot_guess_all_correctly_calls_plot(self):
workspace_name = "test_workspace"
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FULL_FUNCTION, "")
self.browser_plot_interaction.plot_guess_all()
self.figure.get_axes.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=workspace_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False)
def test_plot_current_guess_evaluates_correct_function(self):
workspace_name = "test_workspace"
prefix = 'f1'
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.evaluate_function = Mock()
self.browser_plot_interaction.plot_current_guess()
self.browser_plot_interaction.evaluate_function.assert_called_once_with(workspace_name, FUNCTION_2,
prefix + '.' + FUNCTION_2.name())
def test_plot_current_guess_correctly_calls_plot(self):
workspace_name = "test_workspace"
prefix = 'f1'
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.plot_current_guess()
self.figure.get_axes.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=prefix + '.' + FUNCTION_2.name(),
distribution=True,
update_axes_labels=False, autoscale_on_update=False)
def test_plot_guess_all_plots_for_table_workspaces(self):
table_name = "table_name"
function = FUNCTION_2
self.setup_mock_fit_browser(self.create_table_workspace, table_name, function, "")
self.browser_plot_interaction.plot_guess_all()
self.figure.get_axes.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=table_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False)
def test_remove_function_correctly_updates_stored_prefixed_functions(self):
workspace_name = "test_workspace"
prefix = 'f1'
self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
self.assertEqual(list(self.browser_plot_interaction.guess_lines.keys()), ['f0.FlatBackground', 'f1.GausOsc'])
def test_remove_function_correctly_removes_line(self):
workspace_name = "test_workspace"
prefix = 'f1'
line_1, line_2, line_3 = self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
line_2.remove.assert_called_once()
def test_remove_function_correctly_updates_legend(self):
workspace_name = "test_workspace"
prefix = 'f1'
line_1, line_2, line_3 = self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
# Make legend will be called twice, once when removing the line and the second time to update the legend
# based on the new prefixes
self.assertEqual(self.axes.make_legend.call_count, 2)
line_3.set_label.assert_called_once_with('f1.GausOsc')
def test_remove_function_updates_guess_all(self):
workspace_name = "test_workspace"
prefix = 'f1'
old_line = MagicMock(spec=Line2D)
self.browser_plot_interaction.guess_all_line = old_line
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.slot_for_function_removed()
old_line.remove.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=workspace_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False,
color=old_line.get_color())
def test_changing_parameters_refreshes_guess_all(self):
workspace_name = "test_workspace"
prefix = 'f1'
old_line = MagicMock(spec=Line2D)
self.browser_plot_interaction.guess_all_line = old_line
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.parameters_changed_slot('f1')
old_line.remove.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=workspace_name + '_guess',
distribution=True,
update_axes_labels=False, autoscale_on_update=False,
color=old_line.get_color())
def test_changing_parameters_refreshes_current_guess(self):
workspace_name = "test_workspace"
prefix = 'f1'
line_1, line_2, line_3 = self.create_mock_guess_lines()
self.setup_mock_fit_browser(self.create_workspace2D, workspace_name, FUNCTION_2, prefix)
self.browser_plot_interaction.parameters_changed_slot('f1')
line_2.remove.assert_called_once()
self.axes.plot.assert_called_once_with(ANY, wkspIndex=1, label=prefix + '.' + FUNCTION_2.name(),
distribution=True,
update_axes_labels=False, autoscale_on_update=False,
color=line_2.get_color())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
tata-antares/jet_tagging_LHCb | utils/utils.py | 1 | 10379 | from __future__ import print_function, division
import numpy
import pandas
from matplotlib import pyplot as plt
import matplotlib
from matplotlib import cm
from sklearn.metrics import roc_auc_score
from collections import OrderedDict
from rep.utils import get_efficiencies
from rep.plotting import ErrorPlot
from rep.utils import weighted_quantile
from sklearn.metrics import roc_curve, roc_auc_score
from collections import defaultdict
labels_names_correspondence = {0: "b jets", 1:"c jets", 2: "light jets"}
labels_names_correspondence = OrderedDict(sorted(labels_names_correspondence.items()))
names_labels_correspondence = OrderedDict(map(lambda (x, y): (y, x), labels_names_correspondence.items()))
def add_features(*arrays):
new_data = []
for data in arrays:
data['SV_M_PT'] = numpy.log1p(data['SVM'] / data['SVPT'])
data['SV_MC_PT'] = numpy.log1p(data['SVMCor'] / data['SVPT'])
data['SVM_diff'] = numpy.sqrt(numpy.clip(data['SVMCor'] ** 2 - data['SVM']**2, 0, 1e10))
data['SV_theta'] = numpy.log1p(numpy.sqrt(numpy.clip(data['SVMCor'] ** 2 - data['SVM']**2, 0, 1e10)) / data['SVPT'])
data['SVM_rel'] = numpy.log1p(data['SVM'] / data['SVMCor'])
data['SV_Q_N_rel'] = 1. * data['SVQ'] / data['SVN']
data['SV_Q_abs'] = abs(data['SVQ'])
dot_prod = lambda x, y: x[0]*y[0] + x[1]*y[1] + x[2]*y[2]
sv_pos = (data['SVX'], data['SVY'], data['SVZ'])
sv_p = (data['SVPx'], data['SVPy'], data['SVPz'])
data['SV_cos_angle'] = dot_prod(sv_pos, sv_p) / numpy.sqrt(dot_prod(sv_pos, sv_pos) * dot_prod(sv_p, sv_p))
data['JetSigma1toJetSigma2'] = data['JetSigma1'] / data['JetSigma2']
data.loc[~numpy.isfinite(data['JetSigma1toJetSigma2']), 'JetSigma1toJetSigma2'] = 0
data['JetSigma1multJetSigma2'] = data['JetSigma1'] * data['JetSigma2']
data['SVPTtoJetPT'] = data.SVPT.values / data.JetPT.values
data['MuPTtoJetPT'] = data.MuPT.values / data.JetPT.values
data['HardPTtoJetPT'] = data.HardPT.values / data.JetPT.values
new_data.append(data)
return new_data
def names_labels_correspondence_update(new_labels_names_correspondence):
labels_names_correspondence = new_labels_names_correspondence
labels_names_correspondence = OrderedDict(sorted(labels_names_correspondence.items()))
names_labels_correspondence = OrderedDict(map(lambda (x, y): (y, x), labels_names_correspondence.items()))
def compute_weights(labels):
"""
Compute weight (sum of weights for each class are the same - balanced data).
Parameters
----------
labels : array_like
Label values of samples.
Return
------
weights : array_like
Weight of the each sample.
"""
weights = numpy.ones(len(labels))
for label in numpy.unique(labels):
weights[labels == label] = 1. / sum(labels == label)
weights /= numpy.mean(weights) + 1e-10
return weights
def roc_auc_score_one_vs_all(labels, pred, sample_weight):
"""
Compute ROC AUC values for (one vs rest).
:param array labels: labels (from 0 to 5)
:param array pred: 1d to use it for each class, or ndim: each column corresponds to only one class
:param array sample_weight: weights
:return: pandas.DataFrame with ROC AUC values for each class
"""
rocs = OrderedDict()
if len(pred.shape) == 1:
pred = numpy.vstack([pred] * len(names_labels_correspondence.keys())).T
for key, label in names_labels_correspondence.items():
rocs[key] = [roc_auc_score(labels == label, pred[:, label], sample_weight=sample_weight)]
return pandas.DataFrame(rocs)
def roc_auc_score_one_vs_all_for_separate_algorithms(labels, pred, sample_weight):
"""
Compute ROC AUC values for (one vs rest).
:param array labels: labels (from 0 to 5)
:param dict pred: predcitions for ech label to be signal
:param array sample_weight: weights
:return: pandas.DataFrame with ROC AUC values for each class
"""
rocs = OrderedDict()
for key, label in names_labels_correspondence.items():
rocs[key] = [roc_auc_score(labels == label, pred[label], sample_weight=sample_weight)]
return pandas.DataFrame(rocs)
def plot_roc_one_vs_rest(labels, predictions_dict, weights=None, physics_notion=False, predictions_dict_comparison=None, separate_particles=False, algorithms_name=('MVA', 'baseline')):
"""
Plot roc curves one versus rest.
:param array labels: labels form 0 to 5
:param dict(array) predictions_dict: dict of label/predictions
:param array weights: sample weights
"""
if separate_particles:
plt.figure(figsize=(22, 22))
else:
plt.figure(figsize=(6, 4))
for label, name in labels_names_correspondence.items():
if separate_particles:
plt.subplot(3, 2, label + 1)
for preds, prefix in zip([predictions_dict, predictions_dict_comparison], algorithms_name):
if preds is None:
continue
fpr, tpr, _ = roc_curve(labels == label, preds[label], sample_weight=weights)
auc = roc_auc_score(labels == label, preds[label], sample_weight=weights)
if physics_notion:
plt.plot(tpr * 100, fpr * 100, label='{}, {}, AUC={:1.5f}'.format(prefix, name, auc), linewidth=2)
plt.yscale('log', nonposy='clip')
else:
plt.plot(tpr, 1-fpr, label='{}, AUC={:1.5f}'.format(name, auc), linewidth=2)
if physics_notion:
plt.xlabel('Efficiency', fontsize=22)
plt.ylabel('Overall MisID Efficiency', fontsize=22)
else:
plt.xlabel('Signal efficiency', fontsize=22)
plt.ylabel('Background rejection', fontsize=22)
plt.legend(loc='best', fontsize=18)
def plot_roc_one_vs_one(labels, predictions_dict, weights=None):
"""
Plot roc curves one versus one.
:param array labels: labels form 0 to 5
:param dict(array) predictions_dict: dict of label/predictions
:param array weights: sample weights
"""
plt.figure(figsize=(22, 5))
for label, name in labels_names_correspondence.items():
plt.subplot(1, 3, label + 1)
for label_vs, name_vs in labels_names_correspondence.items():
if label == label_vs:
continue
mask = (labels == label) | (labels == label_vs)
fpr, tpr, _ = roc_curve(labels[mask] == label,
predictions_dict[label][mask] / predictions_dict[label_vs][mask],
sample_weight=weights if weights is None else weights[mask])
auc = roc_auc_score(labels[mask] == label, predictions_dict[label][mask] / predictions_dict[label_vs][mask],
sample_weight=weights if weights is None else weights[mask])
plt.plot(tpr, 1-fpr, label='{} vs {}, AUC={:1.5f}'.format(name, name_vs, auc), linewidth=2)
plt.xlabel('Signal efficiency', fontsize=22)
plt.ylabel('Background rejection', fontsize=22)
plt.legend(loc='best', fontsize=18)
def compute_roc_auc_matrix(labels, predictions_dict, weights=None):
"""
Calculate class vs class roc aucs matrix.
:param array labels: labels form 0 to 5
:param dict(array) predictions_dict: dict of label/predictions
:param array weights: sample weights
"""
# Calculate roc_auc_matrices
roc_auc_matrices = numpy.ones(shape=[len(labels_names_correspondence)] * 2)
for label, name in labels_names_correspondence.items():
for label_vs, name_vs in labels_names_correspondence.items():
if label == label_vs:
continue
mask = (labels == label) | (labels == label_vs)
roc_auc_matrices[label, label_vs] = roc_auc_score(labels[mask] == label,
predictions_dict[label][mask] / predictions_dict[label_vs][mask],
sample_weight=weights if weights is None else weights[mask])
matrix = pandas.DataFrame(roc_auc_matrices, columns=names_labels_correspondence.keys(),
index=names_labels_correspondence.keys())
fig=plot_matrix(matrix)
return fig, matrix
def plot_matrix(matrix, vmin=0.8, vmax=1., title='Particle vs particle ROC AUCs', fmt='.5f'):
# Plot roc_auc_matrices
inline_rc = dict(matplotlib.rcParams)
import seaborn as sns
fig = plt.figure(figsize=(4, 3))
sns.set()
ax = plt.axes()
sns.heatmap(matrix, vmin=vmin, vmax=vmax, annot=True, fmt=fmt, ax=ax, cmap=cm.coolwarm)
plt.title(title, size=12)
plt.xticks(size=12)
plt.yticks(size=12)
plt.show()
plt.clf()
plt.close()
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
matplotlib.rcParams.update(inline_rc)
return fig
def generate_plots(preds, labels, weights, data, path=''):
matrix_auc_one_vs_rest = roc_auc_score_one_vs_all_for_separate_algorithms(labels, preds, weights)
print (matrix_auc_one_vs_rest)
plot_roc_one_vs_rest(labels, preds, weights)
# plt.savefig(os.path.join(path, 'overall_roc_auc.png'), format='png')
f, matrix_auc_one_vs_one = compute_roc_auc_matrix(labels, preds, weights)
# f.savefig(os.path.join(path, 'class_vs_class_roc_auc_matrix.png'), format='png')
#matrix_auc_one_vs_rest.to_csv(os.path.join(path, 'class_vs_rest_roc_auc_matrix.csv'))
#matrix_auc_one_vs_one.to_csv(os.path.join(path, 'class_vs_class_roc_auc_matrix.csv'))
plot_roc_one_vs_one(labels, preds, weights)
# plt.savefig(os.path.join(path, 'one_vs_one_roc_auc.png'), format='png')
def plot_feature_importances(feature_importances, features):
imp = numpy.array(feature_importances)
names = numpy.array(features)
sort = imp.argsort()
plt.figure(figsize=(12, numpy.ceil(8 * len(features) / 30.) ))
plt.barh(range(len(imp)), imp[sort], align='center', color='b')
plt.yticks(range(len(names)), names[sort], rotation=0)
plt.title("Feature Importances", fontsize=15)
plt.xlabel('Importance', fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=12)
plt.ylim(-0.5, len(names))
plt.grid(linewidth=1)
plt.show() | apache-2.0 |
popgengui/negui | setup.py | 1 | 1916 | '''
Description
'''
__filename__ = "setup.py"
__date__ = "20171105"
__author__ = "Ted Cosart<ted.cosart@umontana.edu>"
import os
from setuptools import setup, find_packages
def get_version():
PARAMNAME="progversion"
PARAM_VAL_DELIMIT="="
IDX_VAL=1
STARTUP_INFO_LOC="/agestrucne/resources/startup.info"
s_version="version.unknown"
s_my_mod_path=os.path.abspath( __file__ )
s_my_mod_dir=os.path.dirname( s_my_mod_path )
s_startup_info_file=s_my_mod_dir + STARTUP_INFO_LOC
if os.path.exists( s_startup_info_file ):
o_file=open( s_startup_info_file )
for s_line in o_file:
if s_line.startswith( PARAMNAME ):
s_version= s_line.strip().split( PARAM_VAL_DELIMIT )[ IDX_VAL ]
#end if line starts with param name
#end for each line in file
o_file.close()
#end if path exists
return s_version
#end get_version
setup(
name = 'agestrucnb',
packages = [ 'agestrucne', 'agestrucne/asnviz' ],
version = get_version(),
license = 'AGPLv3',
description = "GUI and command line program for simulating populations using simuPOP, " \
+ "estimating Nb and Ne using LDNe, and vizualizing the results.",
author = 'several people',
author_email = 'agestrucne@gmail.com',
url = '',
download_url = '',
keywords = ['population genetics', 'simuPOP', 'LDNe', 'AgeStructureNe'],
classifiers = ['License :: OSI Approved :: GNU Affero General Public License v3' ],
include_package_data=True,
install_requires=[ "numpy",
"matplotlib",
"scipy",
"future",
"psutil",
"natsort",
'configparser;python_version=="2.7"',
'pyttk;python_version=="2.7"',
'simupop;python_version>="3.0"' ],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*,<=4',
entry_points={ 'console_scripts': [ 'agestrucnb=agestrucne.negui:negui_main' ] },
scripts=[ 'agestrucne/pgdriveneestimator.py', 'agestrucne/pgdrivesimulation.py' ]
)
| agpl-3.0 |
jniediek/combinato | combinato/plot/plot_sorted.py | 1 | 4903 | #!/usr/bin/env python3
# JN 2015-02-18
# pylint: disable=E1101,star-args
"""
plot all clusters from a channel in one overview figure
"""
from __future__ import print_function, division, absolute_import
import os
import numpy as np
import matplotlib.pyplot as mpl
from matplotlib.gridspec import GridSpec
from .. import Combinato, TYPE_NAMES, h5files
from .plot_cumulative_time import spike_cumulative
from .spike_heatmap import spike_heatmap
SIGNS = ('pos', 'neg')
BOXSIZE = 1
NCOLS = 7
FONTSIZE = 8
GRID_ARGS = {'left': .005,
'right': .995,
'bottom': .005,
'top': .995,
'wspace': 0,
'hspace': 0}
def clust_overview_plot(groups, outname):
"""
create an overview plot constructed from groups
"""
nrows = 0
if not len(groups):
return
# calculate number of rows
for group in groups.values():
nrows += np.ceil((len(group['images']) + 2.1)/NCOLS)
# print(len(group['images']), nrows)
nrows = max(nrows, 1)
grid = GridSpec(int(nrows), NCOLS, **GRID_ARGS)
fig = mpl.figure(figsize=(NCOLS*BOXSIZE, nrows*BOXSIZE))
row = 0
for gid in sorted(groups.keys()):
print(gid, end=' ')
group = groups[gid]
gtype = TYPE_NAMES[group['type']]
col = 0
print('row {}/{}, col {}/{}'.format(row, nrows, col, NCOLS))
plot = fig.add_subplot(grid[row, col])
# summary plot
spike_heatmap(plot, group['spikes'])
plot.set_xticks([])
plot.set_yticks([])
plot.axis('off')
plot = plot.twiny()
spike_cumulative(plot, np.sort(group['times']), special=False)
plot.set_xticks([])
plot.set_yticks([])
# label it
label = '{} {} {}'.format(gid, len(group['times']), gtype)
print(label)
pos = (plot.get_xlim()[0], plot.get_ylim()[0])
plot.text(pos[0], pos[1], label, backgroundcolor='w',
va='bottom', fontsize=FONTSIZE)
# plot all subclusters
col = 1
for img_name in group['images']:
try:
print(img_name)
image = mpl.imread(img_name)
except IOError as err:
print(err)
continue
if col == NCOLS:
col = 0
row += 1
print('row {}/{}, col {}/{}'.format(row, nrows, col, NCOLS))
plot = fig.add_subplot(grid[row, col])
plot.imshow(image)
plot.axis('off')
plot.set_xticks([])
plot.set_yticks([])
col += 1
row += 1
# suptitle = '{} {} ... {}'.format(fname, sessions[0], sessions[-1])
# fig.suptitle(suptitle)
print('saving to ' + outname)
fig.savefig(outname, dpi=300)
mpl.close(fig)
def run_file(fname, savefolder, sign, label):
"""
run overview plot on one file
"""
manager = Combinato(fname, sign, label)
if not manager.initialized:
return
if manager.header is not None:
entity = manager.header['AcqEntName']
else:
entity = 'unknown'
if not manager.initialized:
print('could not initialize ' + fname)
return
# basedir = os.path.dirname(fname)
groups = manager.get_groups_joined()
image_dict = manager.get_groups(times=False, spikes=False)
for gid in groups:
groups[gid]['images'] = []
gtype = manager.get_group_type(gid)
groups[gid]['type'] = gtype
if gid in image_dict:
for clid in image_dict[gid]:
groups[gid]['images'].append(image_dict[gid][clid]['image'])
wext = os.path.splitext(os.path.basename(fname))[0]
ncs_fname = wext[5:]
# sessions = manager.session_groups['pos']
# groups = get_data_from_sessions(manager, sessions,
# sign, ['times', 'spikes'],
# skip_artifacts=False)
if groups is None:
return
outname_base = 'sorted_{}_{}_{}_{}.png'.\
format(entity, ncs_fname, sign, label)
outname = os.path.join(savefolder, outname_base)
clust_overview_plot(groups, outname)
def parse_args():
"""
standard arg parser
"""
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--files', '--datafiles', nargs='+')
parser.add_argument('--label', required=True)
parser.add_argument('--neg', action='store_true', default=False)
args = parser.parse_args()
if not os.path.isdir('overview'):
os.mkdir('overview')
savefolder = 'overview'
if args.files:
fnames = args.files
else:
fnames = h5files(os.getcwd())
sign = 'neg' if args.neg else 'pos'
label = args.label
for fname in fnames:
print(fname)
run_file(fname, savefolder, sign, label)
| mit |
lbdreyer/cartopy | lib/cartopy/examples/effects_of_the_ellipse.py | 5 | 4876 | """
The effect of badly referencing an ellipse
------------------------------------------
This example demonstrates the effect of referencing your data to an incorrect
ellipse.
First we define two coordinate systems - one using the World Geodetic System
established in 1984 and the other using a spherical globe. Next we extract
data from the Natural Earth land dataset and convert the Geodetic
coordinates (referenced in WGS84) into the respective coordinate systems
that we have defined. Finally, we plot these datasets onto a map assuming
that they are both referenced to the WGS84 ellipse and compare how the
coastlines are shifted as a result of referencing the incorrect ellipse.
"""
__tags__ = ['Lines and polygons']
import cartopy.crs as ccrs
import cartopy.feature
from cartopy.io.img_tiles import MapQuestOpenAerial
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D as Line
from matplotlib.patheffects import Stroke
import numpy as np
import shapely.geometry as sgeom
from shapely.ops import transform as geom_transform
def transform_fn_factory(target_crs, source_crs):
"""
Return a function which can be used by ``shapely.op.transform``
to transform the coordinate points of a geometry.
The function explicitly *does not* do any interpolation or clever
transformation of the coordinate points, so there is no guarantee
that the resulting geometry would make any sense.
"""
def transform_fn(x, y, z=None):
new_coords = target_crs.transform_points(source_crs,
np.asanyarray(x),
np.asanyarray(y))
return new_coords[:, 0], new_coords[:, 1], new_coords[:, 2]
return transform_fn
def main():
# Define the two coordinate systems with different ellipses.
wgs84 = ccrs.PlateCarree(globe=ccrs.Globe(datum='WGS84',
ellipse='WGS84'))
sphere = ccrs.PlateCarree(globe=ccrs.Globe(datum='WGS84',
ellipse='sphere'))
# Define the coordinate system of the data we have from Natural Earth and
# acquire the 1:10m physical coastline shapefile.
geodetic = ccrs.Geodetic(globe=ccrs.Globe(datum='WGS84'))
dataset = cartopy.feature.NaturalEarthFeature(category='physical',
name='coastline',
scale='10m')
# Create a MapQuest map tiler instance, and use its CRS for the GeoAxes.
tiler = MapQuestOpenAerial()
ax = plt.axes(projection=tiler.crs)
plt.title('The effect of incorrectly referencing the Solomon Islands')
# Pick the area of interest. In our case, roughly the Solomon Islands, and
# get hold of the coastlines for that area.
extent = (155, 163, -11.5, -6)
ax.set_extent(extent, geodetic)
geoms = list(dataset.intersecting_geometries(extent))
# Add the MapQuest aerial imagery at zoom level 7.
ax.add_image(tiler, 7)
# Transform the geodetic coordinates of the coastlines into the two
# projections of differing ellipses.
wgs84_geoms = [geom_transform(transform_fn_factory(wgs84, geodetic),
geom) for geom in geoms]
sphere_geoms = [geom_transform(transform_fn_factory(sphere, geodetic),
geom) for geom in geoms]
# Using these differently referenced geometries, assume that they are
# both referenced to WGS84.
ax.add_geometries(wgs84_geoms, wgs84, edgecolor='white', color='none')
ax.add_geometries(sphere_geoms, wgs84, edgecolor='gray', color='none')
# Create a legend for the coastlines.
legend_artists = [Line([0], [0], color=color, linewidth=3)
for color in ('white', 'gray')]
legend_texts = ['Correct ellipse\n(WGS84)', 'Incorrect ellipse\n(sphere)']
legend = plt.legend(legend_artists, legend_texts, fancybox=True,
loc='lower left', framealpha=0.75)
legend.legendPatch.set_facecolor('wheat')
# Create an inset GeoAxes showing the location of the Solomon Islands.
sub_ax = plt.axes([0.7, 0.625, 0.2, 0.2], projection=ccrs.PlateCarree())
sub_ax.set_extent([110, 180, -50, 10], geodetic)
# Make a nice border around the inset axes.
effect = Stroke(linewidth=4, foreground='wheat', alpha=0.5)
sub_ax.outline_patch.set_path_effects([effect])
# Add the land, coastlines and the extent of the Solomon Islands.
sub_ax.add_feature(cartopy.feature.LAND)
sub_ax.coastlines()
extent_box = sgeom.box(extent[0], extent[2], extent[1], extent[3])
sub_ax.add_geometries([extent_box], ccrs.PlateCarree(), color='none',
edgecolor='blue', linewidth=2)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
Chuban/moose | modules/porous_flow/doc/tests/dirackernels.py | 10 | 7355 | #!/usr/bin/env python
import os
import sys
import numpy as np
from scipy.special import erf
import matplotlib.pyplot as plt
def bh02_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
density = 1000
viscosity = 1.0E-3
return wc * density * pressure / viscosity
def bh02():
f = open("../../tests/dirackernels/gold/bh02.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh03_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
density = 1000
viscosity = 1.0E-3
return wc * density * (pressure - 1E7) / viscosity
def bh03():
f = open("../../tests/dirackernels/gold/bh03.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh04_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
alpha = 1.0E-5
m = 0.8
n = 2.0
bottom_p = -1.0E6
bulk = 2.0E9
dens0 = 1000
viscosity = 1.0E-3
saturation = (1.0 + (- alpha * pressure)**(1.0 / (1.0 - m)))**(- m)
relperm = (n + 1.0) * saturation**n - n * saturation**(n + 1.0)
density = dens0 * np.exp(pressure / bulk)
return wc * density * relperm * (pressure - bottom_p) / viscosity
def bh04():
f = open("../../tests/dirackernels/gold/bh04.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh05_expected(pressure):
perm = 1.0E-12
ele_length = 2
radius = 0.1
bh_length = 1
re = 0.28
r0 = re * np.sqrt(ele_length**2 + ele_length**2) / 2.0
wc = 2 * np.pi * np.sqrt(perm**2) * bh_length / np.log(r0 / radius)
alpha = 1.0E-5
m = 0.8
n = 2.0
bottom_p = 0
bulk = 2.0E9
dens0 = 1000
viscosity = 1.0E-3
saturation = (1.0 + (- alpha * pressure)**(1.0 / (1.0 - m)))**(- m)
relperm = (n + 1.0) * saturation**n - n * saturation**(n + 1.0)
density = dens0 * np.exp(pressure / bulk)
return wc * density * relperm * (pressure - bottom_p) / viscosity
def bh05():
f = open("../../tests/dirackernels/gold/bh05.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 5]
pfe = [(data[i][4], data[i][1] / (data[i][0] - data[i - 1][0]), data[i][5]) for i in range(1, len(data))]
return pfe
def bh07_expected(r):
dens0 = 1000.0
bulk = 2.0E9
P_bh = 0
rho_bh = dens0 * np.exp(P_bh / bulk)
P_R = 1.0E7
rho_R = dens0 * np.exp(P_R / bulk)
r_bh = 1.0
outer_r = 300
rho = rho_bh + (rho_R - rho_bh) * np.log(r / r_bh) / np.log(outer_r / r_bh)
return bulk * np.log(rho / dens0)
def bh07():
f = open("../../tests/dirackernels/gold/bh07_csv_pp_0003.csv")
data = [line.strip().split(",") for line in f.readlines()[1:]]
f.close()
data = [map(float, line) for line in data if len(line) > 3]
xp = [(data[i][2], data[i][1]) for i in range(0, len(data), 10)]
return xp
ppoints = np.arange(0, 1.01E7, 1E6)
bh02 = bh02()
plt.figure()
plt.plot(ppoints/1E6, bh02_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E6 for x in bh02], [x[1] for x in bh02], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Fully-saturated production well: flow")
plt.savefig("bh02_flow.pdf")
plt.figure()
plt.plot([x[0]/1E6 for x in bh02], [x[2]*1E15 for x in bh02], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("Mass-balance error (units 1E-15)")
plt.title("Fully-saturated production well: mass-balance error")
plt.savefig("bh02_error.pdf")
ppoints = np.arange(0, 1.01E7, 1E6)
bh03 = bh03()
plt.figure()
plt.plot(ppoints/1E6, bh03_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E6 for x in bh03], [x[1] for x in bh03], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Fully-saturated injection well: flow")
plt.savefig("bh03_flow.pdf")
plt.figure()
plt.plot([x[0]/1E6 for x in bh03], [x[2]*1E15 for x in bh03], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (MPa)")
plt.ylabel("Mass-balance error (units 1E-15)")
plt.title("Fully-saturated injection well: mass-balance error")
plt.savefig("bh03_error.pdf")
ppoints = np.arange(-2.0E5, 0, 1E3)
bh04 = bh04()
plt.figure()
plt.plot(ppoints/1E3, bh04_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E3 for x in bh04], [x[1] for x in bh04], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Unsaturated production well: flow")
plt.savefig("bh04_flow.pdf")
plt.figure()
plt.plot([x[0]/1E3 for x in bh04], [x[2]*1E13 for x in bh04], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("Mass-balance error (units 1E-13)")
plt.title("Unsaturated production well: mass-balance error")
plt.savefig("bh04_error.pdf")
ppoints = np.arange(-2.0E5, 0, 1E3)
bh05 = bh05()
plt.figure()
plt.plot(ppoints/1E3, bh05_expected(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0]/1E3 for x in bh05], [x[1] for x in bh05], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("flow rate (kg/s)")
plt.title("Unsaturated injection well: flow")
plt.savefig("bh05_flow.pdf")
plt.figure()
plt.plot([x[0]/1E3 for x in bh05], [x[2]*1E10 for x in bh05], 'rs', markersize = 10.0, label = 'MOOSE')
plt.xlabel("Porepressure (kPa)")
plt.ylabel("Mass-balance error (units 1E-10)")
plt.title("Unsaturated injection well: mass-balance error")
plt.savefig("bh05_error.pdf")
rpoints = np.arange(1, 301, 3)
bh07 = bh07()
plt.figure()
plt.plot(rpoints, bh07_expected(rpoints)/1E6, 'k-', linewidth = 3.0, label = 'expected')
plt.plot([x[0] for x in bh07], [x[1]/1E6 for x in bh07], 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("radius (m)")
plt.ylabel("Porepressure (MPa)")
plt.title("Steadystate porepressure distribution due to production borehole")
plt.savefig("bh07.pdf")
sys.exit(0)
| lgpl-2.1 |
sriharshams/mlnd | customer_segments/visuals.py | 21 | 6047 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
def pca_results(good_data, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def cluster_results(reduced_data, preds, centers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions
Adds cues for cluster centers and student-selected sample data
'''
predictions = pd.DataFrame(preds, columns = ['Cluster'])
plot_data = pd.concat([predictions, reduced_data], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned cluster
for i, cluster in plot_data.groupby('Cluster'):
cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30);
# Plot centers with indicators
for i, c in enumerate(centers):
ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \
alpha = 1, linewidth = 2, marker = 'o', s=200);
ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100);
# Plot transformed sample points
ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \
s = 150, linewidth = 4, color = 'black', marker = 'x');
# Set plot title
ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross");
def biplot(good_data, reduced_data, pca):
'''
Produce a biplot that shows a scatterplot of the reduced
data and the projections of the original features.
good_data: original data, before transformation.
Needs to be a pandas dataframe with valid column names
reduced_data: the reduced data (the first two dimensions are plotted)
pca: pca object that contains the components_ attribute
return: a matplotlib AxesSubplot object (for any additional customization)
This procedure is inspired by the script:
https://github.com/teddyroland/python-biplot
'''
fig, ax = plt.subplots(figsize = (14,8))
# scatterplot of the reduced data
ax.scatter(x=reduced_data.loc[:, 'Dimension 1'], y=reduced_data.loc[:, 'Dimension 2'],
facecolors='b', edgecolors='b', s=70, alpha=0.5)
feature_vectors = pca.components_.T
# we use scaling factors to make the arrows easier to see
arrow_size, text_pos = 7.0, 8.0,
# projections of the original features
for i, v in enumerate(feature_vectors):
ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1],
head_width=0.2, head_length=0.2, linewidth=2, color='red')
ax.text(v[0]*text_pos, v[1]*text_pos, good_data.columns[i], color='black',
ha='center', va='center', fontsize=18)
ax.set_xlabel("Dimension 1", fontsize=14)
ax.set_ylabel("Dimension 2", fontsize=14)
ax.set_title("PC plane with original feature projections.", fontsize=16);
return ax
def channel_results(reduced_data, outliers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions using the full dataset
Data is labeled by "Channel" and cues added for student-selected sample data
'''
# Check that the dataset is loadable
try:
full_data = pd.read_csv("customers.csv")
except:
print "Dataset could not be loaded. Is the file missing?"
return False
# Create the Channel DataFrame
channel = pd.DataFrame(full_data['Channel'], columns = ['Channel'])
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data, channel], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned Channel
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
grouped = labeled.groupby('Channel')
for i, channel in grouped:
channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i-1)*1.0/2), label = labels[i-1], s=30);
# Plot transformed sample points
for i, sample in enumerate(pca_samples):
ax.scatter(x = sample[0], y = sample[1], \
s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none');
ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125);
# Set plot title
ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled"); | apache-2.0 |
rameez3333/skylab | doc/examples/weighted_sensitivity.py | 2 | 1818 | # -*-coding:utf8-*-
import data
import numpy as np
from skylab.ps_injector import PointSourceInjector
from skylab.utils import poisson_weight
import matplotlib
matplotlib.use("QT4Agg")
import matplotlib.pyplot as plt
if __name__=="__main__":
# init likelihood class
llh = data.init(1000, 1000, ncpu=4)
mc = data.MC(100000)
print(llh)
# init a injector class sampling events at a point source
inj = PointSourceInjector(2., seed=0)
# start calculation for dec = 0
x = list()
y = list()
y2 = list()
ndec = 1
nmu = 7
for dec in np.linspace(-np.pi/2., np.pi/2., ndec + 2)[1:-1]:
inj.fill(dec, mc)
#llh.do_trials(dec, n_iter=10)
#continue
result = llh.weighted_sensitivity(dec, [0.5, 2.87e-7],
[0.9, 0.5],
inj,
#fit="exp",
n_bckg=1000,
n_iter=250,
eps=5.e-2)
mu = np.unique(np.array(np.linspace(0., max(result["mu"]), nmu),
dtype=np.int))
t = result["trials"]
bins = np.linspace(*np.percentile(t["TS"], [5., 100.]), num=500)
w = [poisson_weight(t["n_inj"], i) for i in mu]
plt.hist(t["TS"], weights=w[0], bins=bins, histtype="step",
label=r"$\mu=0$", cumulative=-1, normed=True)
plt.hist([t["TS"] for i in mu[1:]][::-1], weights=w[1:][::-1],
bins=bins, histtype="step",
label=[r"$\mu={0:d}$".format(i) for i in mu[1:]][::-1],
cumulative=1,
normed=True)
plt.legend(loc="best")
plt.show()
| gpl-3.0 |
baojianzhou/DLReadingGroup | keras/keras/callbacks.py | 1 | 39787 | from __future__ import absolute_import
from __future__ import print_function
import os
import csv
import six
import numpy as np
import time
import json
import warnings
from collections import deque
from collections import OrderedDict
from collections import Iterable
from .utils.generic_utils import Progbar
from . import backend as K
try:
import requests
except ImportError:
requests = None
if K.backend() == 'tensorflow':
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
class CallbackList(object):
"""Container abstracting a list of callbacks.
# Arguments
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
# Arguments
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
# Arguments
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
# Arguments
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and
delta_t_median > 0.1):
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.'
% delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
# Arguments
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.'
% delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
# Arguments
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
# Arguments
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self):
super(TerminateOnNaN, self).__init__()
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
# Arguments
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
# Raises
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target,
verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' %
(epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
# Arguments
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self, monitor='val_loss',
min_delta=0, patience=0, verbose=0, mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
# Arguments
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json', 'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires '
'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(self.root + self.path,
{self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
warnings.warn('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
TensorBoard is a visualization tool provided with TensorFlow.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
# Arguments
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network
for histograms computation.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
def __init__(self, log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
if K.backend() != 'tensorflow':
raise RuntimeError('TensorBoard callback only works '
'with the TensorFlow backend.')
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
self.batch_size = batch_size
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf.summary.histogram(weight.name, weight)
if self.write_grads:
grads = model.optimizer.get_gradients(model.total_loss,
weight)
tf.summary.histogram('{}_grad'.format(weight.name), grads)
if self.write_images:
w_img = tf.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = tf.transpose(w_img)
shape = K.int_shape(w_img)
w_img = tf.reshape(w_img, [1,
shape[0],
shape[1],
1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = tf.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = tf.reshape(w_img, [shape[0],
shape[1],
shape[2],
1])
elif len(shape) == 1: # bias case
w_img = tf.reshape(w_img, [1,
shape[0],
1,
1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf.summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf.summary.histogram('{}_out'.format(layer.name),
layer.output)
self.merged = tf.summary.merge_all()
if self.write_graph:
self.writer = tf.summary.FileWriter(self.log_dir,
self.sess.graph)
else:
self.writer = tf.summary.FileWriter(self.log_dir)
if self.embeddings_freq:
self.saver = tf.train.Saver()
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding']
embeddings = {layer.name: layer.weights[0]
for layer in self.model.layers
if layer.name in embeddings_layer_names}
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()}
config = projector.ProjectorConfig()
self.embeddings_logs = []
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
self.embeddings_logs.append(os.path.join(self.log_dir,
layer_name + '.ckpt'))
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
val_data = self.validation_data
tensors = (self.model.inputs +
self.model.targets +
self.model.sample_weights)
if self.model.uses_learning_phase:
tensors += [K.learning_phase()]
assert len(val_data) == len(tensors)
val_size = val_data[0].shape[0]
i = 0
while i < val_size:
step = min(self.batch_size, val_size - i)
batch_val = []
batch_val.append(val_data[0][i:i + step])
batch_val.append(val_data[1][i:i + step])
batch_val.append(val_data[2][i:i + step])
if self.model.uses_learning_phase:
batch_val.append(val_data[3])
feed_dict = dict(zip(tensors, batch_val))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
i += self.batch_size
if self.embeddings_freq and self.embeddings_logs:
if epoch % self.embeddings_freq == 0:
for log in self.embeddings_logs:
self.saver.save(self.sess, log, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
# Example
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
# Arguments
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self, monitor='val_loss', factor=0.1, patience=10,
verbose=0, mode='auto', epsilon=1e-4, cooldown=0, min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau '
'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
self.monitor, RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
# Example
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
# Arguments
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.file_flags = 'b' if six.PY2 and os.name == 'nt' else ''
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a' + self.file_flags)
else:
self.csv_file = open(self.filename, 'w' + self.file_flags)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(self.csv_file,
fieldnames=['epoch'] + self.keys, dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
# Arguments
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
# Example
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
jblackburne/scikit-learn | sklearn/tests/test_base.py | 2 | 11255 | # Author: Gael Varoquaux
# License: BSD 3 clause
import sys
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.utils import deprecated
from sklearn.base import TransformerMixin
from sklearn.utils.mocking import MockDataFrame
import pickle
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class ModifyInitParams(BaseEstimator):
"""Deprecated behavior.
Equal parameters but with a type cast.
Doesn't fulfill a is a
"""
def __init__(self, a=np.array([0])):
self.a = a.copy()
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_copy_init_params():
# test for deprecation warning when copying or casting an init parameter
est = ModifyInitParams()
message = ("Estimator ModifyInitParams modifies parameters in __init__. "
"This behavior is deprecated as of 0.18 and support "
"for this behavior will be removed in 0.20.")
assert_warns_message(DeprecationWarning, message, clone, est)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
PY26 = sys.version_info[:2] == (2, 6)
if PY26:
# sp.dok_matrix can not be deepcopied in Python 2.6
sparse_matrix_classes.remove(sp.dok_matrix)
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline(
[('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
def test_clone_pandas_dataframe():
class DummyEstimator(BaseEstimator, TransformerMixin):
"""This is a dummy class for generating numerical features
This feature extractor extracts numerical features from pandas data
frame.
Parameters
----------
df: pandas data frame
The pandas data frame parameter.
Notes
-----
"""
def __init__(self, df=None, scalar_param=1):
self.df = df
self.scalar_param = scalar_param
def fit(self, X, y=None):
pass
def transform(self, X, y=None):
pass
# build and clone estimator
d = np.arange(10)
df = MockDataFrame(d)
e = DummyEstimator(df, scalar_param=1)
cloned_e = clone(e)
# the test
assert_true((e.df == cloned_e.df).values.all())
assert_equal(e.scalar_param, cloned_e.scalar_param)
class TreeNoVersion(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
def test_pickle_version_warning():
# check that warnings are raised when unpickling in a different version
# first, check no warning when in the same version:
iris = datasets.load_iris()
tree = DecisionTreeClassifier().fit(iris.data, iris.target)
tree_pickle = pickle.dumps(tree)
assert_true(b"version" in tree_pickle)
assert_no_warnings(pickle.loads, tree_pickle)
# check that warning is raised on different version
tree_pickle_other = tree_pickle.replace(sklearn.__version__.encode(),
b"something")
message = ("Trying to unpickle estimator DecisionTreeClassifier from "
"version {0} when using version {1}. This might lead to "
"breaking code or invalid results. "
"Use at your own risk.".format("something",
sklearn.__version__))
assert_warns_message(UserWarning, message, pickle.loads, tree_pickle_other)
# check that not including any version also works:
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert_false(b"version" in tree_pickle_noversion)
message = message.replace("something", "pre-0.18")
message = message.replace("DecisionTreeClassifier", "TreeNoVersion")
# check we got the warning about using pre-0.18 pickle
assert_warns_message(UserWarning, message, pickle.loads,
tree_pickle_noversion)
# check that no warning is raised for external estimators
TreeNoVersion.__module__ = "notsklearn"
assert_no_warnings(pickle.loads, tree_pickle_noversion)
| bsd-3-clause |
ehogan/iris | docs/iris/example_code/Meteorology/wind_speed.py | 11 | 2361 | """
Plotting wind direction using quiver
===========================================================
This example demonstrates using quiver to plot wind speed contours and wind
direction arrows from wind vector component input data. The vector components
are co-located in space in this case.
For the second plot, the data used for the arrows is normalised to produce
arrows with a uniform size on the plot.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coord_categorisation
import iris.quickplot as qplt
import cartopy
import cartopy.feature as cfeat
import cartopy.crs as ccrs
def main():
# Load the u and v components of wind from a pp file
infile = iris.sample_data_path('wind_speed_lake_victoria.pp')
uwind = iris.load_cube(infile, 'x_wind')
vwind = iris.load_cube(infile, 'y_wind')
ulon = uwind.coord('longitude')
vlon = vwind.coord('longitude')
# The longitude points go from 180 to 540, so subtract 360 from them
ulon.points = ulon.points - 360.0
vlon.points = vlon.points - 360.0
# Create a cube containing the wind speed
windspeed = (uwind ** 2 + vwind ** 2) ** 0.5
windspeed.rename('windspeed')
x = ulon.points
y = uwind.coord('latitude').points
u = uwind.data
v = vwind.data
# Set up axes to show the lake
lakes = cfeat.NaturalEarthFeature('physical', 'lakes', '50m',
facecolor='none')
plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_feature(lakes)
# Get the coordinate reference system used by the data
transform = ulon.coord_system.as_cartopy_projection()
# Plot the wind speed as a contour plot
qplt.contourf(windspeed, 20)
# Add arrows to show the wind vectors
plt.quiver(x, y, u, v, pivot='middle', transform=transform)
plt.title("Wind speed over Lake Victoria")
qplt.show()
# Normalise the data for uniform arrow size
u_norm = u / np.sqrt(u ** 2.0 + v ** 2.0)
v_norm = v / np.sqrt(u ** 2.0 + v ** 2.0)
plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_feature(lakes)
qplt.contourf(windspeed, 20)
plt.quiver(x, y, u_norm, v_norm, pivot='middle', transform=transform)
plt.title("Wind speed over Lake Victoria")
qplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
WalkingMachine/sara_commun | wm_ork/capture/sandbox/orb_template_gen.py | 1 | 2597 | #!/usr/bin/env python
import ecto
from ecto_opencv import cv_bp
from ecto_opencv.highgui import imread, MatWriter
from ecto_opencv.features2d import ORB, ORBstats, DescriptorAccumulator, KeypointsToMat
from ecto_opencv.imgproc import cvtColor, Conversion
from ecto_opencv.calib import PointsTo3d
from ecto.opts import scheduler_options, run_plasm, cell_options
import os
import numpy as np
import matplotlib.pyplot as plt
import shutil
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Test orb on images.')
parser.add_argument('-i,--input', dest='input',
help='The input dir. %(default)s', default='./images')
parser.add_argument('-o,--output', dest='output', type=str,
help='The output directory for this template. Default: %(default)s', default='./')
factory = cell_options(parser, ORB, 'ORB')
scheduler_options(parser.add_argument_group('Scheduler'), default_niter=1)
options = parser.parse_args()
options.niter = 1
options.orb_factory = factory
return options
options = parse_args()
image = imread(image_file=options.input)
shutil.copy(options.input, os.path.join(options.output, 'train.png'))
orb_m = options.orb_factory(options)
rgb2gray = cvtColor (flag=Conversion.RGB2GRAY)
kpts2mat = KeypointsToMat()
ptsTo3d = PointsTo3d(scale=0.0254 / 100) #100 dpi ~> 25.4 mm/ 100 px
plasm = ecto.Plasm()
plasm.connect(image['image'] >> orb_m['image'],
orb_m['keypoints'] >> kpts2mat['keypoints'],
kpts2mat['points'] >> ptsTo3d['points']
)
if not os.path.exists(options.output):
print 'making ', options.output
os.makedirs(options.output)
#training
points3d_writer = MatWriter(filename=os.path.join(options.output, 'points3d.yaml'))
points_writer = MatWriter(filename=os.path.join(options.output, 'points.yaml'))
descriptor_writer = MatWriter(filename=os.path.join(options.output, 'descriptors.yaml'))
R_writer = MatWriter(filename=os.path.join(options.output, 'R.yaml'))
T_writer = MatWriter(filename=os.path.join(options.output, 'T.yaml'))
m = cv_bp.Mat()
m.fromarray(np.eye(3, 3, dtype=np.float64))
R_writer.inputs.mat = m
m = cv_bp.Mat()
m.fromarray(np.zeros((3, 1), dtype=np.float64))
T_writer.inputs.mat = m
for y, x in (
(orb_m['descriptors'], descriptor_writer),
(kpts2mat['points'], points_writer),
(ptsTo3d['points3d'], points3d_writer)
):
plasm.connect(y >> x['mat'],
)
T_writer.process()
R_writer.process()
run_plasm(options, plasm, locals=vars())
| apache-2.0 |
ZENGXH/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
lifei96/Medium-crawler-with-data-analyzer | User_Crawler/xgb_pr.py | 2 | 1512 | # -*- coding: utf-8 -*-
import pandas as pd
import xgboost as xgb
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
import numpy as np
def f1(preds, dtrain):
return 'f1-score', -f1_score(dtrain.get_label(), preds, average='weighted')
def xgb_pr():
train_set = pd.read_csv('./data/prediction/dataset_1_train.csv')
test_set = pd.read_csv('./data/prediction/dataset_1_test.csv')
y_train = np.array(train_set['class_1'].values.tolist())
y_test = np.array(test_set['class_1'].values.tolist())
train_set = train_set.drop('class_1', axis=1)
test_set = test_set.drop('class_1', axis=1)
X_train = np.array(train_set.values.tolist())
X_test = np.array(test_set.values.tolist())
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
param = {'learning_rate': 0.1,
'n_estimators': 100,
'max_depth': 6,
'min_child_weight': 1,
'gamma': 0,
'subsample': 0.8,
'colsample_bytree': 0.8,
'reg_alpha': 0,
'objective': 'multi:softmax',
'num_class': 2,
'seed': 7,
'silent': 1}
evallist = [(dtest, 'eval')]
bst = xgb.train(param, dtrain, num_boost_round=300, evals=evallist, feval=f1, early_stopping_rounds=50)
print (param)
preds = bst.predict(dtest)
#print (classification_report(y_test, preds, digits=6))
if __name__ == '__main__':
xgb_pr()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.