repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
Winand/pandas | pandas/errors/__init__.py | 6 | 1697 | # flake8: noqa
"""
Expose public exceptions & warnings
"""
from pandas._libs.tslib import OutOfBoundsDatetime
class PerformanceWarning(Warning):
"""
Warning raised when there is a possible
performance impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
Exception raised when attempting to call a numpy function
on a pandas object, but that function is not supported by
the object e.g. ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex,
and the index has not been lexsorted. Subclass of `KeyError`.
.. versionadded:: 0.20.0
"""
class ParserError(ValueError):
"""
Exception that is raised by an error encountered in `pd.read_csv`.
"""
class DtypeWarning(Warning):
"""
Warning that is raised for a dtype incompatiblity. This
can happen whenever `pd.read_csv` encounters non-
uniform dtypes in a column(s) of a given CSV file.
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered.
"""
class ParserWarning(Warning):
"""
Warning that is raised in `pd.read_csv` whenever it is necessary
to change parsers (generally from 'c' to 'python') contrary to the
one specified by the user due to lack of support or functionality for
parsing particular attributes of a CSV file with the requsted engine.
"""
class MergeError(ValueError):
"""
Error raised when problems arise during merging due to problems
with input data. Subclass of `ValueError`.
"""
| bsd-3-clause |
dpressel/baseline | scripts/compare_calibrations.py | 1 | 6096 | """Plot and compare the metrics from various calibrated models.
This script creates the following:
* A csv file with columns for the Model Type (the label), and the various calibration metrics
* A grid of graphs, the first row is confidence histograms for each model, the second row is
the reliability diagram for that model.
* If the problem as binary it creates calibration curves for each model all plotted on the same graph.
Matplotlib is required to use this script. The `tabulate` package is recommended but not required.
The input of this script is pickle files created by `$MEAD-BASELINE/api-examples/analyze_calibration.py`
"""
import csv
import pickle
import argparse
from collections import Counter
from eight_mile.calibration import (
expected_calibration_error,
maximum_calibration_error,
reliability_diagram,
reliability_curve,
confidence_histogram,
Bins,
)
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Compare calibrated models by grouping visualizations and creating a table.")
parser.add_argument("--stats", nargs="+", default=[], required=True, help="A list of pickles created by the analyze_calibration.py script to compare.")
parser.add_argument("--labels", nargs="+", default=[], required=True, help="A list of labels to assign to each pickle, should have the same number of arguments as --stats")
parser.add_argument("--metrics-output", "--metrics_output", default="table.csv", help="Filename to save the resulting metrics into as a csv")
parser.add_argument("--curve-output", "--curve_output", default="curve.png", help="Filename to save the reliability curves graph to.")
parser.add_argument("--diagram-output", "--diagram_output", default="diagram.png", help="Filename to save the reliability diagrams and confidence histograms too.")
parser.add_argument("--figsize", default=10, type=int, help="The size of the figure, controls how tall the figure is.")
args = parser.parse_args()
# Make sure the labels and stats are aligned
if len(args.stats) != len(args.labels):
raise ValueError(f"You need a label for each calibration stat you load. Got {len(args.stats)} stats and {len(args.labels)} labels")
# Make sure the labels are unique
counts = Counter(args.labels)
if any(v != 1 for v in counts.values()):
raise ValueError(f"All labels must be unique, found duplicates of {[k for k, v in counts.items() if v != 1]}")
# Load the calibration stats
stats = []
for file_name in args.stats:
with open(file_name, "rb") as f:
stats.append(pickle.load(f))
# Make sure there is the same number of bins for each model
for field in stats[0]:
if not isinstance(stats[0][field], Bins):
continue
lengths = []
for stat in stats:
if stat[field] is None:
continue
lengths.append(len(stat[field].accs))
if len(set(lengths)) != 1:
raise ValueError(f"It is meaningless to compare calibrations with different numbers of bins: Mismatch was found for {field}")
def get_metrics(data, model_type):
return {
"Model Type": model_type,
"ECE": expected_calibration_error(data.accs, data.confs, data.counts) * 100,
"MCE": maximum_calibration_error(data.accs, data.confs, data.counts) * 100,
}
# Calculate the metrics based on the multiclass calibration bins
metrics = [get_metrics(stat['multiclass'], label) for stat, label in zip(stats, args.labels)]
# Print the metrics
try:
# If you have tabulate installed it prints a nice postgres style table
from tabulate import tabulate
print(tabulate(metrics, headers="keys", floatfmt=".3f", tablefmt="psql"))
except ImportError:
for metric in metrics:
for k, v in metric.items():
if isinstance(v, float):
print(f"{k}: {v:.3f}")
else:
print(f"{k}: {v}")
# Write the metrics to a csv to look at later
with open(args.metrics_output, "w", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=list(metrics[0].keys()), quoting=csv.QUOTE_MINIMAL, delimiter=",", dialect="unix")
writer.writeheader()
writer.writerows(metrics)
# Plot the histograms and graphs for each model
f, ax = plt.subplots(2, len(metrics), figsize=(args.figsize * len(metrics) // 2, args.figsize), sharey=True, sharex=True)
for i, (stat, label) in enumerate(zip(stats, args.labels)):
# If you are the first model you get y_labels, everyone else just uses yours
if i == 0:
confidence_histogram(
stat['histogram'].edges,
stat['histogram'].counts,
acc=stat['acc'],
avg_conf=stat['conf'],
title=f"{label}\nConfidence Distribution",
x_label=None,
ax=ax[0][i],
)
reliability_diagram(
stat['multiclass'].accs,
stat['multiclass'].confs,
stat['multiclass'].edges,
num_classes=stat['num_classes'],
ax=ax[1][i]
)
else:
confidence_histogram(
stat['histogram'].edges,
stat['histogram'].counts,
acc=stat['acc'],
avg_conf=stat['conf'],
title=f"{label}\nConfidence Distribution",
y_label=None,
x_label=None,
ax=ax[0][i],
)
reliability_diagram(
stat['multiclass'].accs,
stat['multiclass'].confs,
stat['multiclass'].edges,
num_classes=stat['num_classes'],
y_label=None,
ax=ax[1][i]
)
f.savefig(args.diagram_output)
plt.show()
# Plot reliability curves for binary classification models
if stats[0]['num_classes'] == 2:
f, ax = plt.subplots(1, 1, figsize=(args.figsize, args.figsize))
for stat, label, color in zip(stats, args.labels, plt.rcParams['axes.prop_cycle'].by_key()['color']):
reliability_curve(
stat['binary'].accs,
stat['binary'].confs,
color=color,
label=label,
ax=ax
)
f.savefig(args.curve_output)
plt.show()
| apache-2.0 |
WebMole/crawler-benchmark | tests/test_home.py | 1 | 2095 | import matplotlib
matplotlib.use('Agg')
import os
import tempfile
import pytest
import project
@pytest.fixture
def client():
db_fd, project.app.config['DATABASE'] = tempfile.mkstemp()
project.app.config['TESTING'] = True
client = project.app.test_client()
with project.app.app_context():
project.init_db()
yield client
os.close(db_fd)
os.unlink(project.app.config['DATABASE'])
def test_home(client):
response = client.get('/')
assert response._status_code == 200
def test_blog(client):
response = client.get('/modes/blog/')
assert response._status_code == 200
def test_forum(client):
response = client.get('/modes/forum/')
assert response._status_code == 200
def test_newsfeed(client):
response = client.get('/modes/newsfeed/')
assert response._status_code == 200
def test_forms(client):
response = client.get('/modes/forms/')
assert response._status_code == 200
def test_catalog(client):
response = client.get('/modes/catalog/')
assert response._status_code == 200
def test_errors(client):
response = client.get('/trap/errors/')
assert response._status_code == 200
def test_random(client):
response = client.get('/trap/random/')
assert response._status_code == 200
def test_outgoing(client):
response = client.get('/trap/outgoing/')
assert response._status_code == 200
def test_login(client):
response = client.get('/trap/login/')
assert response._status_code == 200
def test_cookies(client):
response = client.get('/trap/cookies/')
assert response._status_code == 200
def test_recaptcha_without_key(client):
response = client.get('/trap/recaptcha/')
assert response._status_code == 500
def test_depth(client):
response = client.get('/trap/depth/')
assert response._status_code == 200
def test_calendar(client):
response = client.get('/trap/calendar/')
assert response._status_code == 200
def test_registration(client):
response = client.get('/trap/registration/')
assert response._status_code == 200
| gpl-2.0 |
kennethdecker/MagnePlane | paper/images/trade_scripts/pressure_zoom_writer.py | 2 | 7103 | import numpy as np
import matplotlib.pylab as plt
from openmdao.api import Group, Problem, IndepVarComp
from hyperloop.Python import tube_and_pod
# def create_problem(component):
# root = Group()
# prob = Problem(root)
# prob.root.add('comp', component)
# return prob
# class PressureTradeStudy(object):
# def test_case1_vs_npss(self):
# component = tube_and_pod.TubeAndPod()
# prob = create_problem(component)
if __name__ == '__main__':
prob = Problem()
root = prob.root = Group()
root.add('TubeAndPod', tube_and_pod.TubeAndPod())
params = (('tube_pressure', 850.0, {'units' : 'Pa'}),
('pressure_initial', 760.2, {'units' : 'torr'}),
('num_pods', 18.),
('pwr', 18.5, {'units' : 'kW'}),
('speed', 163333.3, {'units' : 'L/min'}),
('time_down', 1440.0, {'units' : 'min'}),
('gamma', .8, {'units' : 'unitless'}),
('pump_weight', 715.0, {'units' : 'kg'}),
('electricity_price', 0.13, {'units' : 'USD/(kW*h)'}),
('tube_thickness', .0415014, {'units' : 'm'}),
('tube_length', 480000., {'units' : 'm'}),
('vf', 286.85, {'units' : 'm/s'}),
('v0', 286.85-15.0, {'units' : 'm/s'}),
('time_thrust', 1.5, {'units' : 's'}),
('pod_mach', .8, {'units': 'unitless'}),
('comp_inlet_area', 2.3884, {'units': 'm**2'}),
('comp_PR', 6.0, {'units': 'unitless'}),
('PsE', 0.05588, {'units': 'psi'}),
('des_time', 1.0),
('time_of_flight', 1.0),
('motor_max_current', 800.0),
('motor_LD_ratio', 0.83),
('motor_oversize_factor', 1.0),
('inverter_efficiency', 1.0),
('battery_cross_section_area', 15000.0, {'units': 'cm**2'}),
('n_passengers', 28.),
('A_payload', 2.3248, {'units' : 'm**2'}),
('r_pylon', 0.232, {'units' : 'm'}),
('h', 10.0, {'units' : 'm'}),
('vel_b', 23.0, {'units': 'm/s'}),
('h_lev', 0.01, {'unit': 'm'}),
('vel', 286.86, {'units': 'm/s'}),
('pod_period', 120.0, {'units' : 's'}),
('ib', .04),
('bm', 20.0, {'units' : 'yr'}),
('track_length', 600.0, {'units' : 'km'}),
('avg_speed', 286.86, {'units' : 'm/s'}),
('depth', 10.0, {'units' : 'm'}),
('land_length', 600.0e3, {'units' : 'm'}),
('water_length', 0.0e3, {'units' : 'm'}),
('W', 1.0, {'units' : 'kg/s'}),
('operating_time', 16.0*3600.0, {'units' : 's'})
)
prob.root.add('des_vars', IndepVarComp(params))
prob.root.connect('des_vars.tube_pressure', 'TubeAndPod.tube_pressure')
prob.root.connect('des_vars.pressure_initial', 'TubeAndPod.pressure_initial')
prob.root.connect('des_vars.num_pods', 'TubeAndPod.num_pods')
prob.root.connect('des_vars.pwr','TubeAndPod.pwr')
prob.root.connect('des_vars.speed', 'TubeAndPod.speed')
prob.root.connect('des_vars.time_down', 'TubeAndPod.time_down')
prob.root.connect('des_vars.gamma','TubeAndPod.gamma')
prob.root.connect('des_vars.pump_weight','TubeAndPod.pump_weight')
prob.root.connect('des_vars.electricity_price','TubeAndPod.electricity_price')
prob.root.connect('des_vars.tube_thickness', 'TubeAndPod.tube_thickness')
prob.root.connect('des_vars.tube_length', 'TubeAndPod.tube_length')
prob.root.connect('des_vars.h', 'TubeAndPod.h')
prob.root.connect('des_vars.r_pylon', 'TubeAndPod.r_pylon')
prob.root.connect('des_vars.vf', 'TubeAndPod.vf')
prob.root.connect('des_vars.v0', 'TubeAndPod.v0')
prob.root.connect('des_vars.time_thrust', 'TubeAndPod.time_thrust')
prob.root.connect('des_vars.pod_mach', 'TubeAndPod.pod_mach')
prob.root.connect('des_vars.comp_inlet_area', 'TubeAndPod.comp_inlet_area')
prob.root.connect('des_vars.comp_PR', 'TubeAndPod.comp.map.PRdes')
prob.root.connect('des_vars.PsE', 'TubeAndPod.nozzle.Ps_exhaust')
prob.root.connect('des_vars.des_time', 'TubeAndPod.des_time')
prob.root.connect('des_vars.time_of_flight', 'TubeAndPod.time_of_flight')
prob.root.connect('des_vars.motor_max_current', 'TubeAndPod.motor_max_current')
prob.root.connect('des_vars.motor_LD_ratio', 'TubeAndPod.motor_LD_ratio')
prob.root.connect('des_vars.motor_oversize_factor', 'TubeAndPod.motor_oversize_factor')
prob.root.connect('des_vars.inverter_efficiency', 'TubeAndPod.inverter_efficiency')
prob.root.connect('des_vars.battery_cross_section_area', 'TubeAndPod.battery_cross_section_area')
prob.root.connect('des_vars.n_passengers', 'TubeAndPod.n_passengers')
prob.root.connect('des_vars.A_payload', 'TubeAndPod.A_payload')
prob.root.connect('des_vars.vel_b', 'TubeAndPod.vel_b')
prob.root.connect('des_vars.h_lev', 'TubeAndPod.h_lev')
prob.root.connect('des_vars.vel', 'TubeAndPod.vel')
prob.root.connect('des_vars.pod_period', 'TubeAndPod.cost.pod_period')
prob.root.connect('des_vars.ib', 'TubeAndPod.cost.ib')
prob.root.connect('des_vars.bm', 'TubeAndPod.cost.bm')
prob.root.connect('des_vars.track_length', 'TubeAndPod.track_length')
prob.root.connect('des_vars.avg_speed', 'TubeAndPod.cost.avg_speed')
prob.root.connect('des_vars.land_length', 'TubeAndPod.land_length')
prob.root.connect('des_vars.water_length', 'TubeAndPod.water_length')
prob.root.connect('des_vars.operating_time', 'TubeAndPod.operating_time')
prob.root.connect('des_vars.W', 'TubeAndPod.fl_start.W')
prob.setup()
p_tunnel = np.linspace(40.0, 500.0, num = 50, endpoint = False)
A_tube = np.zeros((1, len(p_tunnel)))
Re = np.zeros((1, len(p_tunnel)))
T_tunnel = np.zeros((1, len(p_tunnel)))
L_pod = np.zeros((1, len(p_tunnel)))
Drag = np.zeros((1, len(p_tunnel)))
power = np.zeros((1, len(p_tunnel)))
steady_vac = np.zeros((1,len(p_tunnel)))
total_energy = np.zeros((1, len(p_tunnel)))
thrust = np.zeros((1, len(p_tunnel)))
for i in range(len(p_tunnel)):
prob['des_vars.tube_pressure'] = p_tunnel[i]
prob.run()
A_tube[0,i] = prob['TubeAndPod.pod.A_tube']
Re[0,i] = prob['TubeAndPod.pod.pod_mach.Re']
T_tunnel[0,i] = prob['TubeAndPod.tube.temp_boundary']
L_pod[0,i] = prob['TubeAndPod.L_pod']
power[0,i] = -1.0*prob['TubeAndPod.pod.cycle.comp.power']
steady_vac[0,i] = -1.0*prob['TubeAndPod.tube.comp.power']
total_energy[0,i] = prob['TubeAndPod.cost.total_energy_cost']
print(i)
np.savetxt('../../../paper/images/data_files/pressure_zoom/p_tunnel.txt', p_tunnel, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/Re.txt', Re, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/A_tube.txt', A_tube, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/T_tunnel.txt', T_tunnel, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/L_pod.txt', L_pod, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/comp_power.txt', power, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/vac_power.txt', steady_vac, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/total_energy.txt', total_energy, fmt = '%f', delimiter = '\t', newline = '\r\n')
| apache-2.0 |
poppingtonic/BayesDB | bayesdb/tests/experiments/fills_in_the_blanks.py | 2 | 6247 | #
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Lead Developers: Jay Baxter and Dan Lovell
# Authors: Jay Baxter, Dan Lovell, Baxter Eaves, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib
matplotlib.use('Agg')
from bayesdb.client import Client
import experiment_utils as eu
import random
import numpy
import pylab
import time
import os
def run_experiment(argin):
num_iters = argin["num_iters"]
num_chains = argin["num_chains"]
num_rows = argin["num_rows"]
num_cols = argin["num_cols"]
num_views = argin["num_views"]
num_clusters = argin["num_clusters"]
prop_missing = argin["prop_missing"]
impute_samples = argin["impute_samples"]
separation = argin["separation"]
ct_kernel = argin["ct_kernel"]
seed = argin["seed"]
if seed > 0 :
random.seed(seed)
filename = "exp_fills_in_ofile.csv"
table_name = 'exp_fills_in'
argin['cctypes'] = ['continuous']*num_cols
argin['separation'] = [argin['separation']]*num_views
eu.gen_data(filename, argin, save_csv=True)
# generate a new csv
all_filenames = []
all_indices = []
for p in prop_missing:
data_filename, indices, col_names, extra = eu.gen_missing_data_csv(filename,
p, [], True)
all_indices.append(indices)
all_filenames.append(data_filename)
# get the starting table so we can calculate errors
T_array = extra['array_filled']
num_rows, num_cols = T_array.shape
# create a client
client = Client()
# set up a dict fro the different config data
result = dict()
result['cc'] = numpy.zeros(len(prop_missing))
result['crp'] = numpy.zeros(len(prop_missing))
result['nb'] = numpy.zeros(len(prop_missing))
# do analyses
for p in range(len(prop_missing)):
this_indices = all_indices[p]
this_filename = all_filenames[p]
for config in ['cc', 'crp', 'nb']:
config_string = eu.config_map[config]
table = table_name + '-' + config
# drop old btable, create a new one with the new data and init models
client('DROP BTABLE %s;' % table, yes=True)
client('CREATE BTABLE %s FROM %s;' % (table, this_filename))
client('INITIALIZE %i MODELS FOR %s %s;' % (num_chains, table, config_string))
if ct_kernel == 1:
client('ANALYZE %s FOR %i ITERATIONS WITH MH KENEL WAIT;' % (table, num_iters) )
else:
client('ANALYZE %s FOR %i ITERATIONS WAIT;' % (table, num_iters) )
MSE = 0.0
count = 0.0
# imput each index in indices and calculate the squared error
for col in range(0,num_cols):
col_name = col_names[col]
# confidence is set to zero so that a value is always returned
out = client('INFER %s from %s WITH CONFIDENCE %f WITH %i SAMPLES;' % (col_name, table, 0, impute_samples), pretty=False, pandas_output=False )
data = out[0]['data']
# calcaulte MSE
for row, tcol in zip(this_indices[0], this_indices[1]):
if tcol == col:
MSE += ( T_array[row,col] - data[row][1] )**2.0
count += 1.0
result[config][p] = MSE/count
print "error = %f" % result[config][p]
retval = dict()
retval['MSE_naive_bayes_indexer'] = result['nb']
retval['MSE_crp_mixture_indexer'] = result['crp']
retval['MSE_crosscat_indexer'] = result['cc']
retval['prop_missing'] = prop_missing
retval['config'] = argin
return retval
def gen_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--num_iters', default=300, type=int)
parser.add_argument('--num_chains', default=4, type=int)
parser.add_argument('--num_rows', default=300, type=int)
parser.add_argument('--num_cols', default=8, type=int)
parser.add_argument('--num_clusters', default=4, type=int)
parser.add_argument('--impute_samples', default=100, type=int) # samples for IMPUTE
parser.add_argument('--num_views', default=2, type=int)
parser.add_argument('--separation', default=.9, type=float)
parser.add_argument('--prop_missing', nargs='+', type=float, default=[.1, .25, .5, .75, .9]) # list of missing proportions
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--ct_kernel', default=0, type=int) # 0 for gibbs, 1 for MH
parser.add_argument('--no_plots', action='store_true')
return parser
if __name__ == "__main__":
import argparse
import experiment_runner.experiment_utils as eru
from experiment_runner.ExperimentRunner import ExperimentRunner, propagate_to_s3
parser = gen_parser()
args = parser.parse_args()
argsdict = eu.parser_args_to_dict(args)
generate_plots = not argsdict['no_plots']
results_filename = 'fills_in_the_blanks_results'
dirname_prefix = 'fills_in_the_blanks_break'
er = ExperimentRunner(run_experiment, dirname_prefix=dirname_prefix, bucket_str='experiment_runner', storage_type='fs')
er.do_experiments([argsdict])
if generate_plots:
for id in er.frame.index:
result = er._get_result(id)
this_dirname = eru._generate_dirname(dirname_prefix, 10, result['config'])
filename_img = os.path.join(dirname_prefix, this_dirname, results_filename+'.png')
eu.plot_fills_in_the_blanks(result, filename=filename_img)
pass
pass
| apache-2.0 |
rvraghav93/scikit-learn | sklearn/tests/test_isotonic.py | 24 | 14350 | import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_small_number_of_samples():
x = [0, 1, 2]
y = [1, 1.1, 1.05]
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
y = np.array([10, 0, 2])
y_ = np.array([4, 4, 4])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/neighbors/_base.py | 4 | 45497 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from functools import partial
import warnings
from abc import ABCMeta, abstractmethod
import numbers
import numpy as np
from scipy.sparse import csr_matrix, issparse
import joblib
from joblib import Parallel, effective_n_jobs
from ._ball_tree import BallTree
from ._kd_tree import KDTree
from ..base import BaseEstimator, MultiOutputMixin
from ..base import is_classifier
from ..metrics import pairwise_distances_chunked
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import (
check_array,
gen_even_slices,
_to_object_array,
)
from ..utils.deprecation import deprecated
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..utils.validation import check_non_negative
from ..utils.fixes import delayed
from ..utils.fixes import parse_version
from ..exceptions import DataConversionWarning, EfficiencyWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() -
{'haversine', 'nan_euclidean'}))
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
----------
dist : ndarray
The input distances.
weights : {'uniform', 'distance' or a callable}
The kind of weighting used.
Returns
-------
weights_arr : array of the same shape as ``dist``
If ``weights == 'uniform'``, then returns None.
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _is_sorted_by_data(graph):
"""Returns whether the graph's non-zero entries are sorted by data
The non-zero entries are stored in graph.data and graph.indices.
For each row (or sample), the non-zero entries can be either:
- sorted by indices, as after graph.sort_indices();
- sorted by data, as after _check_precomputed(graph);
- not sorted.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
Returns
-------
res : bool
Whether input graph is sorted by data.
"""
assert graph.format == 'csr'
out_of_order = graph.data[:-1] > graph.data[1:]
line_change = np.unique(graph.indptr[1:-1] - 1)
line_change = line_change[line_change < out_of_order.shape[0]]
return (out_of_order.sum() == out_of_order[line_change].sum())
def _check_precomputed(X):
"""Check precomputed distance matrix
If the precomputed distance matrix is sparse, it checks that the non-zero
entries are sorted by distances. If not, the matrix is copied and sorted.
Parameters
----------
X : {sparse matrix, array-like}, (n_samples, n_samples)
Distance matrix to other samples. X may be a sparse matrix, in which
case only non-zero elements may be considered neighbors.
Returns
-------
X : {sparse matrix, array-like}, (n_samples, n_samples)
Distance matrix to other samples. X may be a sparse matrix, in which
case only non-zero elements may be considered neighbors.
"""
if not issparse(X):
X = check_array(X)
check_non_negative(X, whom="precomputed distance matrix.")
return X
else:
graph = X
if graph.format not in ('csr', 'csc', 'coo', 'lil'):
raise TypeError('Sparse matrix in {!r} format is not supported due to '
'its handling of explicit zeros'.format(graph.format))
copied = graph.format != 'csr'
graph = check_array(graph, accept_sparse='csr')
check_non_negative(graph, whom="precomputed distance matrix.")
if not _is_sorted_by_data(graph):
warnings.warn('Precomputed sparse input was not sorted by data.',
EfficiencyWarning)
if not copied:
graph = graph.copy()
# if each sample has the same number of provided neighbors
row_nnz = np.diff(graph.indptr)
if row_nnz.max() == row_nnz.min():
n_samples = graph.shape[0]
distances = graph.data.reshape(n_samples, -1)
order = np.argsort(distances, kind='mergesort')
order += np.arange(n_samples)[:, None] * row_nnz[0]
order = order.ravel()
graph.data = graph.data[order]
graph.indices = graph.indices[order]
else:
for start, stop in zip(graph.indptr, graph.indptr[1:]):
order = np.argsort(graph.data[start:stop], kind='mergesort')
graph.data[start:stop] = graph.data[start:stop][order]
graph.indices[start:stop] = graph.indices[start:stop][order]
return graph
def _kneighbors_from_graph(graph, n_neighbors, return_distance):
"""Decompose a nearest neighbors sparse graph into distances and indices
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_samples, n_neighbors)
Distances to nearest neighbors. Only present if `return_distance=True`.
neigh_ind : ndarray of shape (n_samples, n_neighbors)
Indices of nearest neighbors.
"""
n_samples = graph.shape[0]
assert graph.format == 'csr'
# number of neighbors by samples
row_nnz = np.diff(graph.indptr)
row_nnz_min = row_nnz.min()
if n_neighbors is not None and row_nnz_min < n_neighbors:
raise ValueError(
'%d neighbors per samples are required, but some samples have only'
' %d neighbors in precomputed graph matrix. Decrease number of '
'neighbors used or recompute the graph with more neighbors.'
% (n_neighbors, row_nnz_min))
def extract(a):
# if each sample has the same number of provided neighbors
if row_nnz.max() == row_nnz_min:
return a.reshape(n_samples, -1)[:, :n_neighbors]
else:
idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
idx += graph.indptr[:-1, None]
return a.take(idx, mode='clip').reshape(n_samples, n_neighbors)
if return_distance:
return extract(graph.data), extract(graph.indices)
else:
return extract(graph.indices)
def _radius_neighbors_from_graph(graph, radius, return_distance):
"""Decompose a nearest neighbors sparse graph into distances and indices
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
radius : float
Radius of neighborhoods which should be strictly positive.
return_distance : bool
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_samples,) of arrays
Distances to nearest neighbors. Only present if `return_distance=True`.
neigh_ind : ndarray of shape (n_samples,) of arrays
Indices of nearest neighbors.
"""
assert graph.format == 'csr'
no_filter_needed = bool(graph.data.max() <= radius)
if no_filter_needed:
data, indices, indptr = graph.data, graph.indices, graph.indptr
else:
mask = graph.data <= radius
if return_distance:
data = np.compress(mask, graph.data)
indices = np.compress(mask, graph.indices)
indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]
indices = indices.astype(np.intp, copy=no_filter_needed)
if return_distance:
neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))
neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=None):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
self._check_algorithm_metric()
def _check_algorithm_metric(self):
if self.algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % self.algorithm)
if self.algorithm == 'auto':
if self.metric == 'precomputed':
alg_check = 'brute'
elif (callable(self.metric) or
self.metric in VALID_METRICS['ball_tree']):
alg_check = 'ball_tree'
else:
alg_check = 'brute'
else:
alg_check = self.algorithm
if callable(self.metric):
if self.algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree does not support callable metric '%s'"
"Function call overhead will result"
"in very poor performance."
% self.metric)
elif self.metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid. Use "
"sorted(sklearn.neighbors.VALID_METRICS['%s']) "
"to get valid options. "
"Metric can also be a callable function."
% (self.metric, alg_check))
if self.metric_params is not None and 'p' in self.metric_params:
if self.p is not None:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = self.metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
def _fit(self, X, y=None):
if self._get_tags()["requires_y"]:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X, y = self._validate_data(X, y, accept_sparse="csr",
multi_output=True)
if is_classifier(self):
# Classification targets require a specific format
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a "
"1d array was expected. Please change "
"the shape of y to (n_samples,), for "
"example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(
y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
else:
self._y = y
else:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X = self._validate_data(X, accept_sparse='csr')
self._check_algorithm_metric()
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self.n_samples_fit_ = X.n_samples_fit_
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
self.n_samples_fit_ = X.data.shape[0]
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
self.n_samples_fit_ = X.data.shape[0]
return self
if self.effective_metric_ == 'precomputed':
X = _check_precomputed(X)
self.n_features_in_ = X.shape[1]
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
# Precomputed matrix X must be squared
if self.metric == 'precomputed' and X.shape[0] != X.shape[1]:
raise ValueError("Precomputed matrix must be a square matrix."
" Input is a {}x{} matrix."
.format(X.shape[0], X.shape[1]))
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute'] \
and not callable(self.effective_metric_):
raise ValueError("Metric '%s' not valid for sparse input. "
"Use sorted(sklearn.neighbors."
"VALID_METRICS_SPARSE['brute']) "
"to get valid options. "
"Metric can also be a callable function."
% (self.effective_metric_))
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
self.n_samples_fit_ = X.shape[0]
return self
self._fit_method = self.algorithm
self._fit_X = X
self.n_samples_fit_ = X.shape[0]
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors or small
# number of features, with KDTree generally faster when available
if (self.metric == 'precomputed' or self._fit_X.shape[1] > 15 or
(self.n_neighbors is not None and
self.n_neighbors >= self._fit_X.shape[0] // 2)):
self._fit_method = 'brute'
else:
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
elif (callable(self.effective_metric_) or
self.effective_metric_ in VALID_METRICS['ball_tree']):
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
else:
if not isinstance(self.n_neighbors, numbers.Integral):
raise TypeError(
"n_neighbors does not take %s value, "
"enter integer value" %
type(self.n_neighbors))
return self
def _more_tags(self):
# For cross-validation routines to split data correctly
return {'pairwise': self.metric == 'precomputed'}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated("Attribute _pairwise was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
def _tree_query_parallel_helper(tree, *args, **kwargs):
"""Helper for the Parallel calls in KNeighborsMixin.kneighbors
The Cython method tree.query is not directly picklable by cloudpickle
under PyPy.
"""
return tree.query(*args, **kwargs)
class KNeighborsMixin:
"""Mixin for k-neighbors searches"""
def _kneighbors_reduce_func(self, dist, start,
n_neighbors, return_distance):
"""Reduce a chunk of distances to the nearest neighbors
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : array of shape (n_samples_chunk, n_neighbors)
Returned only if `return_distance=True`.
neigh : array of shape (n_samples_chunk, n_neighbors)
The neighbors indices.
"""
sample_range = np.arange(dist.shape[0])[:, None]
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
return result
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', \
default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True
neigh_ind : ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NearestNeighbors
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples)
NearestNeighbors(n_neighbors=1)
>>> print(neigh.kneighbors([[1., 1., 1.]]))
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False)
array([[1],
[2]]...)
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
n_neighbors
)
else:
if not isinstance(n_neighbors, numbers.Integral):
raise TypeError(
"n_neighbors does not take %s value, "
"enter integer value" %
type(n_neighbors))
if X is not None:
query_is_train = False
if self.effective_metric_ == 'precomputed':
X = _check_precomputed(X)
else:
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(n_samples_fit, n_neighbors)
)
n_jobs = effective_n_jobs(self.n_jobs)
chunked_results = None
if (self._fit_method == 'brute' and
self.effective_metric_ == 'precomputed' and issparse(X)):
results = _kneighbors_from_graph(
X, n_neighbors=n_neighbors,
return_distance=return_distance)
elif self._fit_method == 'brute':
reduce_func = partial(self._kneighbors_reduce_func,
n_neighbors=n_neighbors,
return_distance=return_distance)
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
chunked_results = list(pairwise_distances_chunked(
X, self._fit_X, reduce_func=reduce_func,
metric=self.effective_metric_, n_jobs=n_jobs,
**kwds))
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
old_joblib = (
parse_version(joblib.__version__) < parse_version('0.12'))
if old_joblib:
# Deal with change of API in joblib
parallel_kwargs = {"backend": "threading"}
else:
parallel_kwargs = {"prefer": "threads"}
chunked_results = Parallel(n_jobs, **parallel_kwargs)(
delayed(_tree_query_parallel_helper)(
self._tree, X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
else:
raise ValueError("internal: _fit_method not recognized")
if chunked_results is not None:
if return_distance:
neigh_dist, neigh_ind = zip(*chunked_results)
results = np.vstack(neigh_dist), np.vstack(neigh_ind)
else:
results = np.vstack(chunked_results)
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
n_queries, _ = X.shape
sample_range = np.arange(n_queries)[:, None]
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
if return_distance:
neigh_dist = np.reshape(
neigh_dist[sample_mask], (n_queries, n_neighbors - 1))
return neigh_dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', \
default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data
`A[i, j]` is assigned the weight of edge that connects `i` to `j`.
The matrix is of CSR format.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X)
NearestNeighbors(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
See Also
--------
NearestNeighbors.radius_neighbors_graph
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
# check the input only in self.kneighbors
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
n_queries = A_ind.shape[0]
A_data = np.ones(n_queries * n_neighbors)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_nonzero = n_queries * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_queries, n_samples_fit))
return kneighbors_graph
def _tree_query_radius_parallel_helper(tree, *args, **kwargs):
"""Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors
The Cython method tree.query_radius is not directly picklable by
cloudpickle under PyPy.
"""
return tree.query_radius(*args, **kwargs)
class RadiusNeighborsMixin:
"""Mixin for radius-based neighbors searches"""
def _radius_neighbors_reduce_func(self, dist, start,
radius, return_distance):
"""Reduce a chunk of distances to the nearest neighbors
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
radius : float
The radius considered when making the nearest neighbors search.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : list of ndarray of shape (n_samples_chunk,)
Returned only if `return_distance=True`.
neigh : list of ndarray of shape (n_samples_chunk,)
The neighbors indices.
"""
neigh_ind = [np.where(d <= radius)[0] for d in dist]
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
results = dist, neigh_ind
else:
results = neigh_ind
return results
def radius_neighbors(self, X=None, radius=None, return_distance=True,
sort_results=False):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like of (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Limiting distance of neighbors to return. The default is the value
passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
sort_results : bool, default=False
If True, the distances and indices will be sorted by increasing
distances before being returned. If False, the results may not
be sorted. If `return_distance=False`, setting `sort_results=True`
will result in an error.
.. versionadded:: 0.22
Returns
-------
neigh_dist : ndarray of shape (n_samples,) of arrays
Array representing the distances to each point, only present if
`return_distance=True`. The distance values are computed according
to the ``metric`` constructor parameter.
neigh_ind : ndarray of shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples)
NearestNeighbors(radius=1.6)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0]))
[1.5 0.5]
>>> print(np.asarray(rng[1][0]))
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
check_is_fitted(self)
if X is not None:
query_is_train = False
if self.effective_metric_ == 'precomputed':
X = _check_precomputed(X)
else:
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
if (self._fit_method == 'brute' and
self.effective_metric_ == 'precomputed' and issparse(X)):
results = _radius_neighbors_from_graph(
X, radius=radius, return_distance=return_distance)
elif self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
radius *= radius
kwds = {'squared': True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(self._radius_neighbors_reduce_func,
radius=radius,
return_distance=return_distance)
chunked_results = pairwise_distances_chunked(
X, self._fit_X, reduce_func=reduce_func,
metric=self.effective_metric_, n_jobs=self.n_jobs,
**kwds)
if return_distance:
neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)
neigh_dist_list = sum(neigh_dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = neigh_dist, neigh_ind
else:
neigh_ind_list = sum(chunked_results, [])
results = _to_object_array(neigh_ind_list)
if sort_results:
if not return_distance:
raise ValueError("return_distance must be True "
"if sort_results is True.")
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind='mergesort')
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = neigh_dist, neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
if parse_version(joblib.__version__) < parse_version('0.12'):
# Deal with change of API in joblib
parallel_kwargs = {"backend": "threading"}
else:
parallel_kwargs = {"prefer": "threads"}
chunked_results = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(self._tree, X[s], radius, return_distance,
sort_results=sort_results)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
neigh_ind, neigh_dist = tuple(zip(*chunked_results))
results = np.hstack(neigh_dist), np.hstack(neigh_ind)
else:
results = np.hstack(chunked_results)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
neigh_dist[ind] = neigh_dist[ind][mask]
if return_distance:
return neigh_dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity',
sort_results=False):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Radius of neighborhoods. The default is the value passed to the
constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
sort_results : bool, default=False
If True, in each row of the result, the non-zero entries will be
sorted by increasing distances. If False, the non-zero entries may
not be sorted. Only used with mode='distance'.
.. versionadded:: 0.22
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data
`A[i, j]` is assigned the weight of edge that connects `i` to `j`.
The matrix if of format CSR.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X)
NearestNeighbors(radius=1.5)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
See Also
--------
kneighbors_graph
"""
check_is_fitted(self)
# check the input only in self.radius_neighbors
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True,
sort_results=sort_results)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_queries, n_samples_fit))
| bsd-3-clause |
doylew/detectionsc | format_py/n_gram_svm_with_cv.py | 1 | 24856 | ##################################################
######scikit_learn to do the classifications######
##################################################
##################################################
from time import sleep
from sklearn import svm
from sklearn import cross_validation
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
##################################################
#####Hard coded (currently) where the datasets####
#################are located######################
##################################################
n_compress = "n_compress.txt"
ground_truth = "groundtruth.txt"
file_a0 = "a0.txt"
file_a1 = "a1.txt"
file_a2 = "a2.txt"
file_a3 = "a3.txt"
file_a4 = "a4.txt"
file_a5 = "a5.txt"
file_a6 = "a6.txt"
file_a7 = "a7.txt"
file_a8 = "a8.txt"
file_a9 = "a9.txt"
format_a = [file_a0,file_a1,file_a2,file_a3,file_a4,file_a5,file_a6,file_a7,file_a8,file_a9]
file_n0 = "n0.txt"
file_n1 = "n1.txt"
file_n2 = "n2.txt"
file_n3 = "n3.txt"
file_n4 = "n4.txt"
file_n5 = "n5.txt"
file_n6 = "n6.txt"
file_n7 = "n7.txt"
file_n8 = "n8.txt"
file_n9 = "n9.txt"
format_n = [file_n0,file_n1,file_n2,file_n3,file_n4,file_n5,file_n6,file_n7,file_n8,file_n9]
file_v0 = "v0.txt"
file_v1 = "v1.txt"
file_v2 = "v2.txt"
file_v3 = "v3.txt"
file_v4 = "v4.txt"
file_v5 = "v5.txt"
file_v6 = "v6.txt"
file_v7 = "v7.txt"
file_v8 = "v8.txt"
file_v9 = "v9.txt"
total_percent = 0
format_v = [file_v0,file_v1,file_v2,file_v3,file_v4,file_v5,file_v6,file_v7,file_v8,file_v9]
format_index = 0
binary_array = list()
cur_scores = list()
format_array = []
not_compress = open(str(n_compress),"a")
for i in range(0,4000000):
format_array.extend('0')
def roc_config(fileName,type_):
if isinstance(fileName,str):
my_file = open(str(fileName),"r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
find_new = 0
ret_ = list()
for word in words:
if word == 'new':
words[find_new] = type_
ret_.append(type_)
return ret_
##################################################
####Create the instances for validation testing###
#################################################
##################################################
def makeValidationInstance(fileName,f_index):
if isinstance(fileName,str):
my_file = open(str(fileName),"r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
flag = 0
num_instances = words.count("new")
print("Number of Instances to Validate: " + str(num_instances))
instance = []
data = []
for line in words:
if line == "new":
my_data = [data]
instance += (my_data)
data = []
data.extend([line.split()])
for i in instance:
for entry in i:
if '1' in entry:
flag = 1
entry.remove('1')
format_array[f_index] = '1'
f_index += 1
if '0' in entry:
entry.remove('0')
f_index += 1
if flag == 1:
for i in range(0,num_instances):
gtruth.write("1\n")
else:
for i in range(0,num_instances):
gtruth.write("0\n")
return instance
else:
return -1
##################################################
#####Create the instances for training############
##################################################
##################################################
def makeFitInstance(fileName):
if isinstance(fileName, str):
my_file = open(str(fileName), "r+")
words = my_file.read().split("\n")
my_file.close()
words.remove('')
data = []
for line in words:
data.extend([line.split()])
classi = []
for entry in data:
if entry[-1] == '1':
classi.extend('a')
entry.remove('1')
elif entry[-1] == '0':
classi.extend('n')
entry.remove('0')
instance = {}
instance[0] = data
instance[1] = classi
return instance
else:
return -1
##################################################
#######Calculates the class of the subsequences###
########as a ratio################################
##################################################
def calClass(svm,data,cur_scores):
ret_ = dict()
normal = ['n']
attack = ['a']
num = 0
total_n = 0
total_a = 0
if ['new'] in data:
data.remove(['new'])
for x in data:
num += 1
to_test = svm.predict(x)
if to_test == attack:
total_a += 1
elif to_test == normal:
total_n += 1
else:
print("OOPS")
return
nratio = (float(total_n)/float(num))
cur_scores.insert(len(cur_scores),nratio)
print(str(cur_scores))
aratio = (float(total_a)/float(num))
if nratio > 0.9:
ret_[0] = '0'
ret_[1] = cur_scores
return ret_
else:
ret_[0] = '1'
ret_[1] = cur_scores
return ret_
##################################################
######Removes the instances of new for fitting####
##################################################
##################################################
def removeNew(t_array):
a_return = t_array
for place in a_return:
if place == ['new']:
a_return.remove(['new'])
if ['new'] in a_return:
print("WHOOPS!")
return 1
return a_return
##################################################
#########Percentage validation####################
###########of the validation data#################
##################################################
def validateClass(svm,validation_array,f_index):
ret_ = dict()
validate = 0.0
num = 0.0
print("length: " + str(len(validation_array)))
for data in validation_array:
num += 1
cal_ = calClass(svm,data,cur_scores)
if cal_[0] == format_array[f_index]:
validate += 1
print("NUM: " + str(int(num)) + " CLASSIFIED AS: " + str(cal_[0]))
if cal_[0] == 1:
not_compress.write("1\n")
elif cal_[0] == 0:
not_compress.write("0\n")
ret_[0] = float((validate)/(num))
ret_[1] = cal_[1]
return ret_
##################################################
##############Creates the ground truth############
################for each fold####################
#################################################
##################################################
################Main##############################
##################################################
##################################################
print("Creating the training data...")
##################################################
#############Create the attack and################
#################normal data and combine them#####
##################################################
instance_a0 = makeFitInstance(file_a0)
instance_a1 = makeFitInstance(file_a1)
instance_a2 = makeFitInstance(file_a2)
instance_a3 = makeFitInstance(file_a3)
instance_a4 = makeFitInstance(file_a4)
instance_a5 = makeFitInstance(file_a5)
instance_a6 = makeFitInstance(file_a6)
instance_a7 = makeFitInstance(file_a7)
instance_a8 = makeFitInstance(file_a8)
instance_a9 = makeFitInstance(file_a9)
instance_n0 = makeFitInstance(file_n0)
instance_n1 = makeFitInstance(file_n1)
instance_n2 = makeFitInstance(file_n2)
instance_n3 = makeFitInstance(file_n3)
instance_n4 = makeFitInstance(file_n4)
instance_n5 = makeFitInstance(file_n5)
instance_n6 = makeFitInstance(file_n6)
instance_n7 = makeFitInstance(file_n7)
instance_n8 = makeFitInstance(file_n8)
instance_n9 = makeFitInstance(file_n9)
gtruth = open(str(ground_truth),"a")
clf = svm.SVC()
print("Starting cross validation with 10 folds...")
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
###Fold 1
###
fit_data0 = instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_n0[0] + instance_n1[0] + instance_n2[0] + instance_n3[0] + instance_n4[0] + instance_n5[0] + instance_n6[0] + instance_n7[0] + instance_n8[0]
fit_classes0 = instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_n0[1] + instance_n1[1] + instance_n2[1] + instance_n3[1] + instance_n4[1] + instance_n5[1] + instance_n6[1] + instance_n7[1] + instance_n8[1]
vp = makeValidationInstance(file_a9,format_index)
vpp = makeValidationInstance(file_n9,format_index)
vali0 = vp+ vpp
print("sizzzz:" + str(len(vali0)))
print("Fold 1....")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
print("Validating the classes...")
per0 = validateClass(clf,vali0,format_index)
print("% correct: " + str(per0[0]))
scores_ = per0[1]
binary_array.extend(roc_config(file_a9,1))
binary_array.extend(roc_config(file_n9,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 1')
plt.legend(loc="lower right")
###Fold 2
###
cur_scores = list()
fit_data1 = instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_n1[0] + instance_n2[0] + instance_n3[0] + instance_n4[0] + instance_n5[0] + instance_n6[0] + instance_n7[0] + instance_n8[0] + instance_n9[0]
fit_classes1 = instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_n1[1] + instance_n2[1] + instance_n3[1] + instance_n4[1] + instance_n5[1] + instance_n6[1] + instance_n7[1] + instance_n8[1] + instance_n9[1]
vp = makeValidationInstance(file_a0,format_index)
vpp = makeValidationInstance(file_n0, format_index)
vali1 = vp + vpp
print("Fold 2...")
clf.fit(removeNew(fit_data1),removeNew(fit_classes1))
per1 = validateClass(clf,vali1,format_index)
print("% correct: " + str(per1[0]))
scores_ = list()
scores_ = per1[1]
binary_array = list()
binary_array.extend(roc_config(file_a0,1))
binary_array.extend(roc_config(file_n0,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 2')
plt.legend(loc="lower right")
###Fold 3
###
cur_scores = list()
fit_data0 = instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_n2[0] + instance_n3[0] + instance_n4[0] + instance_n5[0] + instance_n6[0] + instance_n7[0] + instance_n8[0] + instance_n9[0] + instance_n0[0]
fit_classes0 = instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_n2[1] + instance_n3[1] + instance_n4[1] + instance_n5[1] + instance_n6[1] + instance_n7[1] + instance_n8[1] + instance_n9[1] + instance_n0[1]
vp = makeValidationInstance(file_a1,format_index)
vpp = makeValidationInstance(file_n1,format_index)
vali2 = vp + vpp
print("Fold 3...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per2 = validateClass(clf,vali2,format_index)
print("% correct: " + str(per2))
scores_ = list()
scores_ = per2[1]
binary_array = list()
binary_array.extend(roc_config(file_a1,1))
binary_array.extend(roc_config(file_n1,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 3')
plt.legend(loc="lower right")
###Fold 4
###
cur_scores = list()
fit_data0 = instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_n3[0] + instance_n4[0] + instance_n5[0] + instance_n6[0] + instance_n7[0] + instance_n8[0] + instance_n9[0] + instance_n0[0] + instance_n1[0]
fit_classes0 = instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_n3[1] + instance_n4[1] + instance_n5[1] + instance_n6[1] + instance_n7[1] + instance_n8[1] + instance_n9[1] + instance_n0[1] + instance_n1[1]
vp = makeValidationInstance(file_a2,format_index)
vpp = makeValidationInstance(file_n2,format_index)
vali3 = vp + vpp
print("Fold 4...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per3 = validateClass(clf,vali3,format_index)
print("% correct: " + str(per3))
scores_ = list()
scores_ = per3[1]
binary_array = list()
binary_array.extend(roc_config(file_a2,1))
binary_array.extend(roc_config(file_n2,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 4')
plt.legend(loc="lower right")
###Fold 5
###
cur_scores = list()
fit_data0 = instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_n4[0] + instance_n5[0] + instance_n6[0] + instance_n7[0] + instance_n8[0] + instance_n9[0] + instance_n0[0] + instance_n1[0] + instance_n2[0]
fit_classes0 = instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_n4[1] + instance_n5[1] + instance_n6[1] + instance_n7[1] + instance_n8[1] + instance_n9[1] + instance_n0[1] + instance_n1[1] + instance_n2[1]
vp = makeValidationInstance(file_a3,format_index)
vpp = makeValidationInstance(file_n3,format_index)
vali4 = vp + vpp
print("Fold 5...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per4 = validateClass(clf,vali4,format_index)
print("% correct: " + str(per4))
scores_ = list()
scores_ = per4[1]
binary_array = list()
binary_array.extend(roc_config(file_a3,1))
binary_array.extend(roc_config(file_n3,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 5')
plt.legend(loc="lower right")
###Fold 6
###
cur_scores = list()
fit_data0 = instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_n5[0] + instance_n6[0] + instance_n7[0] + instance_n8[0] + instance_n9[0] + instance_n0[0] + instance_n1[0] + instance_n2[0] + instance_n3[0]
fit_classes0 = instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_n5[1] + instance_n6[1] + instance_n7[1] + instance_n8[1] + instance_n9[1] + instance_n0[1] + instance_n1[1] + instance_n2[1] + instance_n3[1]
vp = makeValidationInstance(file_a4,format_index)
vpp = makeValidationInstance(file_n4,format_index)
vali5 = vp + vpp
print("Fold 6...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per5 = validateClass(clf,vali5,format_index)
print("% correct: " + str(per5))
scores_ = list()
scores_ = per5[1]
binary_array = list()
binary_array.extend(roc_config(file_a4,1))
binary_array.extend(roc_config(file_n4,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 6')
plt.legend(loc="lower right")
###Fold 7
###
cur_scores = list()
fit_data0 = instance_a6[0] + instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_n6[0] + instance_n7[0] + instance_n8[0] + instance_n9[0] + instance_n0[0] + instance_n1[0] + instance_n2[0] + instance_n3[0] + instance_n4[0]
fit_classes0 = instance_a6[1] + instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_n6[1] + instance_n7[1] + instance_n8[1] + instance_n9[1] + instance_n0[1] + instance_n1[1] + instance_n2[1] + instance_n3[1] + instance_n4[1]
vp = makeValidationInstance(file_a5,format_index)
vpp = makeValidationInstance(file_n5,format_index)
vali6 = vp + vpp
print("Fold 7...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per6 = validateClass(clf,vali6,format_index)
print("% correct: " + str(per6))
scores_ = list()
scores_ = per6[1]
binary_array = list()
binary_array.extend(roc_config(file_a5,1))
binary_array.extend(roc_config(file_n5,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 7')
plt.legend(loc="lower right")
###Fold 8
###
cur_scores = list()
fit_data0 = instance_a7[0] + instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_n7[0] + instance_n8[0] + instance_n9[0] + instance_n0[0] + instance_n1[0] + instance_n2[0] + instance_n3[0] + instance_n4[0] + instance_n5[0]
fit_classes0 = instance_a7[1] + instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_n7[1] + instance_n8[1] + instance_n9[1] + instance_n0[1] + instance_n1[1] + instance_n2[1] + instance_n3[1] + instance_n4[1] + instance_n5[1]
vp = makeValidationInstance(file_a6,format_index)
vpp = makeValidationInstance(file_n6,format_index)
vali7 = vp + vpp
print("Fold 8...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per7 = validateClass(clf,vali7,format_index)
print("% correct: " + str(per7))
scores_ = list()
scores_ = per7[1]
binary_array = list()
binary_array.extend(roc_config(file_a6,1))
binary_array.extend(roc_config(file_n6,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 8')
plt.legend(loc="lower right")
###Fold 9
###
cur_scores = list()
fit_data0 = instance_a8[0] + instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_n8[0] + instance_n9[0] + instance_n0[0] + instance_n1[0] + instance_n2[0] + instance_n3[0] + instance_n4[0] + instance_n5[0] + instance_n6[0]
fit_classes0 = instance_a8[1] + instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_n8[1] + instance_n9[1] + instance_n0[1] + instance_n1[1] + instance_n2[1] + instance_n3[1] + instance_n4[1] + instance_n5[1] + instance_n6[1]
vp = makeValidationInstance(file_a7,format_index)
vpp = makeValidationInstance(file_n7,format_index)
vali8 = vp + vpp
print("Fold 9...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per8 = validateClass(clf,vali8,format_index)
print("% correct: " + str(per8))
scores_ = list()
scores_ = per8[1]
binary_array = list()
binary_array.extend(roc_config(file_a7,1))
binary_array.extend(roc_config(file_n7,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 9')
plt.legend(loc="lower right")
###Fold 10
####
cur_scores = list()
fit_data0 = instance_a9[0] + instance_a0[0] + instance_a1[0] + instance_a2[0] + instance_a3[0] + instance_a4[0] + instance_a5[0] + instance_a6[0] + instance_a7[0] + instance_n9[0] + instance_n0[0] + instance_n1[0] + instance_n2[0] + instance_n3[0] + instance_n4[0] + instance_n5[0] + instance_n6[0] + instance_n7[0]
fit_classes0 = instance_a9[1] + instance_a0[1] + instance_a1[1] + instance_a2[1] + instance_a3[1] + instance_a4[1] + instance_a5[1] + instance_a6[1] + instance_a7[1] + instance_n9[1] + instance_n0[1] + instance_n1[1] + instance_n2[1] + instance_n3[1] + instance_n4[1] + instance_n5[1] + instance_n6[1] + instance_n7[1]
vp = makeValidationInstance(file_a8,format_index)
vpp = makeValidationInstance(file_n8,format_index)
vali9 = vp + vpp
print("Fold 10...")
clf.fit(removeNew(fit_data0),removeNew(fit_classes0))
per9 = validateClass(clf,vali9,format_index)
print("% correct: " + str(per9))
scores_ = list()
scores_ = per9[1]
binary_array = list()
binary_array.extend(roc_config(file_a8,1))
binary_array.extend(roc_config(file_n8,0))
print("scores_: " + str(scores_))
print("bin_array: " + str(binary_array))
fpr, tpr, thresholds = metrics.roc_curve(binary_array,scores_,pos_label=0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
##plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % metrics.auc(fpr,tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Fold 10')
plt.legend(loc="lower right")
mean_tpr /= 10
mean_tpr[-1] = 1.0
mean_auc = metrics.auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic uncompressed')
plt.legend(loc="lower right",fontsize='small')
savefig('meanroc.png')
gtruth.close()
not_compress.close()
total_percent = per0[0] + per1[0] + per2[0] + per3[0] + per4[0] + per5[0] + per6[0] + per7[0] + per8[0] + per9[0]
print("Total cross validation percentage: " + str(float((total_percent)/(float(10.0)))))
print("Done...saved ROC curves for each fold and mean..")
| mit |
cdiazbas/enhance | enhance.py | 1 | 7212 | import warnings
# To deactivate future warnings:
# warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore")
import numpy as np
import platform
import os
import time
import argparse
from astropy.io import fits
import tensorflow as tf
import keras as krs
import keras.backend.tensorflow_backend as ktf
import models as nn_model
# To deactivate warnings: https://github.com/tensorflow/tensorflow/issues/7778
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.logging.set_verbosity(tf.logging.ERROR)
# Using TensorFlow backend
os.environ["KERAS_BACKEND"] = "tensorflow"
print('tensorflow version:',tf.__version__)
print('keras version:',krs.__version__)
class enhance(object):
def __init__(self, inputFile, depth, model, activation, ntype, output):
self.hdu = fits.open(inputFile)
#Autofix broken header files according to fits standard
self.hdu.verify('silentfix')
index = 0
if np.all(self.hdu[index].data == None): index = 1
self.image = np.nan_to_num(self.hdu[index].data[:,:])
self.header = self.hdu[index].header
print('Size image: ',self.image.shape)
self.input = inputFile
self.depth = depth
self.network_type = model
self.activation = activation
self.ntype = ntype
self.output = output
self.big_image = 2048
self.split = False
self.norm = 1.0
if self.ntype == 'intensity':
self.norm = np.max(self.image)
if self.ntype == 'blos':
self.norm = 1e3
self.image = self.image/self.norm
def define_network(self): #, image):
print("Setting up network...")
#self.image = image
self.nx = self.image.shape[1]
self.ny = self.image.shape[0]
if self.nx > self.big_image or self.ny > self.big_image:
self.split = True
self.nx = int(self.image.shape[1]/2)
self.ny = int(self.image.shape[0]/2)
if (self.network_type == 'keepsize'):
self.model = nn_model.keepsize(self.ny, self.nx, 0.0, self.depth,n_filters=64, l2_reg=1e-7)
print("Loading weights...")
self.model.load_weights("network/{0}_weights.hdf5".format(self.ntype))
def predict_image(self,inputdata):
# Patch for big images in keras
if self.split is True:
M = inputdata.shape[1]//2
N = inputdata.shape[2]//2
out = np.empty((1,inputdata.shape[1]*2,inputdata.shape[2]*2, 1))
for x in range(0,inputdata.shape[1],M):
for y in range(0,inputdata.shape[2],N):
out[:,x*2:x*2+M*2,y*2:y*2+N*2,:] = self.model.predict(inputdata[:,x:x+M,y:y+N,:])
self.nx = inputdata.shape[2]
self.ny = inputdata.shape[1]
else:
out = self.model.predict(inputdata)
print(self.model.predict(inputdata).shape)
return out
def predict(self,plot_option=False,sunpy_map=False):
print("Predicting data...")
input_validation = np.zeros((1,self.image.shape[0],self.image.shape[1],1), dtype='float32')
input_validation[0,:,:,0] = self.image
start = time.time()
out = self.predict_image(input_validation)
end = time.time()
print("Prediction took {0:3.2} seconds...".format(end-start))
print("Updating header ...")
#Calculate scale factor (currently should be 0.5 because of 2 factor upscale)
new_data = out[0,:,:,0]
new_data = new_data*self.norm
new_dim = new_data.shape
scale_factor_x = float(self.nx / new_dim[1])
scale_factor_y = float(self.ny / new_dim[0])
#fix map scale after upsampling
if 'cdelt1' in self.header:
self.header['cdelt1'] *= scale_factor_x
self.header['cdelt2'] *= scale_factor_y
#WCS rotation keywords used by IRAF and HST
if 'CD1_1' in self.header:
self.header['CD1_1'] *= scale_factor_x
self.header['CD2_1'] *= scale_factor_x
self.header['CD1_2'] *= scale_factor_y
self.header['CD2_2'] *= scale_factor_y
#Patch center with respect of lower left corner
if 'crpix1' in self.header:
self.header['crpix1'] /= scale_factor_x
self.header['crpix2'] /= scale_factor_y
#Number of pixel per axis
if 'naxis1' in self.header:
self.header['naxis1'] = new_dim[1]
self.header['naxis2'] = new_dim[0]
print("Saving data...")
hdu = fits.PrimaryHDU(new_data, self.header)
import os.path
if os.path.exists(self.output):
os.system('rm {0}'.format(self.output))
print('Overwriting...')
hdu.writeto('{0}'.format(self.output), output_verify="ignore")
if plot_option is True:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
plt.imshow(self.image,cmap='gray',origin='lower',vmin=self.image.min(),vmax=self.image.max())
plt.subplot(122)
plt.imshow(out[0,:,:,0],cmap='gray',origin='lower',vmin=self.image.min(),vmax=self.image.max())
plt.tight_layout()
plt.savefig('hmi_test.pdf', bbox_inches='tight')
if sunpy_map is True:
import sunpy.map
sdomap0 =sunpy.map.Map(self.input)
sdomap1 =sunpy.map.Map(self.output)
plt.figure()
plt.subplot(121)
sdomap0.plot()
plt.subplot(122)
sdomap1.plot()
plt.tight_layout()
plt.savefig('hmi_test2.pdf', bbox_inches='tight')
if (__name__ == '__main__'):
"""
Using Enhance for prediction:
=============================
python enhance.py -i samples/hmi.fits -t intensity -o output/hmi_enhanced.fits
python enhance.py -i samples/blos.fits -t blos -o output/blos_enhanced.fits
"""
parser = argparse.ArgumentParser(description='Prediction')
parser.add_argument('-i','--input', help='input')
parser.add_argument('-o','--out', help='out')
parser.add_argument('-d','--depth', help='depth', default=5)
parser.add_argument('-m','--model', help='model', choices=['encdec', 'encdec_reflect', 'keepsize_zero', 'keepsize'], default='keepsize')
parser.add_argument('-c','--activation', help='Activation', choices=['relu', 'elu'], default='relu')
parser.add_argument('-t','--type', help='type', choices=['intensity', 'blos'], default='intensity')
parsed = vars(parser.parse_args())
#f = fits.open(parsed['input'])
#imgs = f[0].data
#hdr = f[0].header
print('Model : {0}'.format(parsed['type']))
out = enhance('{0}'.format(parsed['input']), depth=int(parsed['depth']), model=parsed['model'], activation=parsed['activation'],ntype=parsed['type'], output=parsed['out'])
#out.define_network(image=imgs)
out.define_network()
out.predict(plot_option=False)
# To avoid the TF_DeleteStatus message:
# https://github.com/tensorflow/tensorflow/issues/3388
ktf.clear_session()
| mit |
physycom/metnum | python/bernoulli2D.py | 1 | 1068 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 22:30:58 2018
@author: NICO
"""
import numpy as np # numerical library
import matplotlib.pylab as plt # plot library
import matplotlib.animation as animation # animation plot
#%% Bernoulli2D
bernoulli2D = lambda x : np.mod(2*x, 1) # bernoulli formula
# initial condition
N = 100 # number of points
x = np.linspace(0, 1, N) # x steps
y = np.linspace(0, 1, N) # y steps
meanx, stdx = np.mean(x), np.std(x)
meany, stdy = np.mean(y), np.std(y)
x, y = np.meshgrid(x, y)
G = np.exp( - ( .5*(x-meanx)**2 / stdx**2 + .5*(y-meany)**2 / stdy**2 ) )
fig = plt.figure(figsize=(8,8))
time = 100 # number of iterations
ims = np.empty(time - 1, dtype=np.object)
ev0 = G
for i in range(1, time):
ev = bernoulli2D(ev0)
ims[i-1] = [plt.imshow( ev, animated=True )]
ev0 = ev
movie = animation.ArtistAnimation(fig,
ims,
interval=50,
blit=True,
repeat_delay=100
) | bsd-2-clause |
keialk/TRAP | TimeSeries.py | 1 | 9396 |
"""
TRAP - Time-series RNA-seq Analysis Package
Created by Kyuri Jo on 2014-02-05.
Copyright (c) 2014 Kyuri Jo. All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import random
import copy
import numpy as np
import networkx as nx
import scipy.stats as stats
import matplotlib.pyplot as plt
import TRAP
def new_hypergeom_sf(k, *args, **kwds):
(M, n, N) = args[0:3]
try:
return stats.hypergeom.sf(k, *args, **kwds)
except Exception as inst:
if k >= n and type(inst) == IndexError:
return 0 ## or conversely 1 - hypergeom.cdf(k, *args, **kwds)
else:
raise inst
def calPF(g, gene, redic, PFdic, t, a, recur) :
if (g in redic) :
PFsum_pre = 0
PFsum_curr = 0
if (t>0) :
for asc in redic[g] :
PFsum_pre = PFsum_pre + asc[2]*(PFdic[t-1][asc[0]]/asc[1])
for asc in redic[g] :
if (asc[0] not in PFdic[t]) :
if (asc[0] in recur) :
PFdic[t][asc[0]]=gene[asc[0]]
else :
recur.add(g)
calPF(asc[0], gene, redic, PFdic, t, a, recur)
PFsum_curr = PFsum_curr + asc[2]*(PFdic[t][asc[0]]/asc[1])
PFdic[t][g] = a*PFsum_pre + (1-a)*PFsum_curr + gene[g]
else :
PFdic[t][g] = gene[g]
def pathwayAnalysis(outPath, wgene, wredic, DEG, idDic, pnameDic, timeLag, timeLen, ind, fcList) :
fileN = len(ind)
tA = []
status = []
pORA = []
pOFDR = []
pPERT = []
pG = []
pFDR = []
pMIX = []
totWgene = []
for t in range(timeLen) :
totWgene.append([])
for g,exp in fcList.iteritems():
totWgene[t].append(exp[t])
for i in range(0, fileN) :
tA.append(0)
status.append([])
pORA.append(0)
pOFDR.append(0)
pPERT.append(0)
pG.append(0)
pFDR.append(0)
pMIX.append(0)
if wredic[i]=={} :
continue
# pPERT
# Calculation of PF
tempPF = []
currtA = 0
recur = set()
for t in range(0, timeLen) :
tempPF.append({})
for gene in wgene[t][i] :
calPF(gene, wgene[t][i], wredic[i], tempPF, t, timeLag, recur)
currtA = currtA + sum(tempPF[t].values())-sum(wgene[t][i].values())
status[i].append(sum(tempPF[t].values()))
tA[i] = currtA
# Calculation of tA (Null dist)
nulltA = []
repeat = 2000
for j in range(0, repeat) :
nullTemp = 0
randPF = []
tempFC = []
recur = set()
for t in range(0, timeLen) :
tempFCt = copy.copy(wgene[t][i])
# sh = tempFCt.values()
# random.shuffle(sh)
for key, value in tempFCt.iteritems() :
# tempFCt[key]=sh[random.randint(0, len(tempFCt)-1)]
tempFCt[key]=totWgene[t][random.randint(0, len(totWgene[t])-1)]
tempFC.append(tempFCt)
for t in range(0, timeLen) :
randPF.append({})
for g in tempFCt :
calPF(g, tempFC[t], wredic[i], randPF, t, timeLag, recur)
nullTemp = nullTemp + sum(randPF[t].values())-sum(tempFC[t].values())
nulltA.append(nullTemp)
def above(x):
return round(x, 5)>=round(currtA, 5)
def below(x):
return round(x, 5)<=round(currtA, 5)
avgtA = np.median(nulltA)
if (currtA >=avgtA) :
pPERT[i]=float(len(filter(above, nulltA)))/float(repeat)
else :
pPERT[i]=float(len(filter(below, nulltA)))/float(repeat)
for t in range(timeLen) :
if status[i][t] >=0 :
status[i][t]="Activated"
else :
status[i][t]="Inhibited"
# pORA
genesum = {}
DEGset = []
DEGsum = 0
for i in range(0, fileN) :
genesum.update(wgene[0][i])
for t in range(0, timeLen) :
DEGset.append(set())
for i in range(0, fileN) :
DEGset[t] = DEGset[t].union(DEG[t][i])
DEGsum = DEGsum + len(DEGset[t])
totG = len(genesum)*timeLen
totD = DEGsum
geneNum = []
DEGnum = []
for i in range(0, fileN) :
geneNum.append(0)
DEGnum.append(0)
geneNum[i]=len(wgene[0][i])*timeLen
for t in range(0, timeLen) :
DEGnum[i] = DEGnum[i] + len(DEG[t][i])
for i in range(0, fileN):
pORA[i]=new_hypergeom_sf(DEGnum[i], totG, totD, geneNum[i], loc=0)
# pG
for i in range(0, fileN) :
c = pORA[i]*pPERT[i]
if (c<=0) :
pG[i]==0
else :
pG[i] = c-c*math.log(c)
pFDR = TRAP.cal_FDR(pG)
pOFDR = TRAP.cal_FDR(pORA)
for i in range(0, fileN) :
if (wredic[i]=={}) :
pMIX[i]=pOFDR[i]
else :
pMIX[i]=pFDR[i]
# Text result
outDEG = open(outPath+"_DEG.txt", "w")
tempDEG = set()
outDEG.write("GeneID\t")
for t in range(0, timeLen) :
outDEG.write(str(t)+"\t")
tempDEG = tempDEG.union(DEGset[t])
outDEG.write("\n")
for gene in tempDEG :
if (gene in idDic) :
outDEG.write(idDic[gene][0]+"\t")
else :
outDEG.write(gene+"\t")
for t in range(0, timeLen):
if (gene in DEGset[t]) :
outDEG.write("O\t")
else :
outDEG.write("X\t")
outDEG.write("\n")
outDEG.close()
outPathway = open(outPath+"_pathway.txt", "w")
sthead = []
for t in range(timeLen) :
sthead.append('Status'+str(t+1))
outPathway.write("PathwayID\tPathwayName \tGeneNum\tDEGNum\tpORA\tpORAfdr\ttA\tpPERT\tpG\tpG_fdr\t"+'\t'.join(sthead)+"\n")
sortedkey = sorted(ind, key = lambda x : pMIX[ind[x]])
for sk in sortedkey :
i = ind[sk]
ststr = []
for t in range(timeLen) :
ststr.append('.')
pathwayName = ""
if (sk in pnameDic) :
pathwayName = pnameDic[sk]
nameLen = len(pathwayName)
if (nameLen<15) :
pathwayName = pathwayName+TRAP.addStr(18-nameLen)
else :
pathwayName = pathwayName[0:15]+"..."
if (wredic[i]=={}) :
outPathway.write(sk+"\t"+pathwayName+"\t"+str(geneNum[i])+"\t"+str(DEGnum[i])+"\t"+str(round(pORA[i],3))+"\t"+str(round(pOFDR[i], 3))+"\t.\t.\t.\t.\t"+'\t'.join(ststr)+"\n")
else :
outPathway.write(sk+"\t"+pathwayName+"\t"+str(geneNum[i])+"\t"+str(DEGnum[i])+"\t"+str(round(pORA[i],3))+"\t"+str(round(pOFDR[i],3))+"\t"+str(round(tA[i],3))+"\t"+str(round(pPERT[i],3))+"\t"+str(round(pG[i],3))+"\t"+str(round(pFDR[i],3))+"\t"+'\t'.join(status[i])+"\n")
outPathway.close()
# Graph result
G = nx.Graph()
for f,i in ind.iteritems() :
if (wredic[i]=={}) :
pval=pOFDR[i]
if (pval<=0.01) :
color='#B2FFB2'
elif (pval<=0.05) :
color='#4CFF4C'
else :
color='#FFFFFF'
else :
pval=pFDR[i]
if (status[i]=="Activated") :
if (pval<=0.01) :
color='#FFB2B2'
elif (pval<=0.05) :
color='#FF4C4C'
else :
color='#FFFFFF'
else :
if (pval<=0.01) :
color='#B2B2FF'
elif (pval<=0.05) :
color='#4C4CFF'
else :
color='#FFFFFF'
if (len(wgene[0][i])>=300) :
size = 4500
elif (len(wgene[0][i])<=50) :
size = 750
else :
size = len(wgene[0][i])*15
if pval>=0.1 :
continue
G.add_node(f[:8], color=color, size=size, pval=pval)
edgelist = []
plist = nx.get_node_attributes(G, 'pval')
for f1,i1 in ind.iteritems() :
for f2,i2 in ind.iteritems() :
if (f1!=f2 and f1[:8] in plist and f2[:8] in plist and (set([f1,f2]) not in edgelist)):
edgelist.append(set([f1,f2]))
inter = dict.fromkeys(x for x in wgene[0][i1] if x in wgene[0][i2])
intsize = len(inter)
if (intsize!=0) :
G.add_edge(f1, f2, label=intsize)
nodeN = G.number_of_nodes()
if (nodeN>0) :
graph_pos=nx.fruchterman_reingold_layout(G, dim=2, pos=None, fixed=None, iterations=50, weight='label', scale=1)
nodes,ncolors = zip(*nx.get_node_attributes(G,'color').items())
nodes,sizes = zip(*nx.get_node_attributes(G, 'size').items())
colorDict = dict(zip(nodes, ncolors))
sizeDict = dict(zip(nodes, sizes))
if (G.number_of_edges()>0) :
edges,labels = zip(*nx.get_edge_attributes(G, 'label').items())
labeldic = dict(zip(edges, labels))
plt.figure(figsize=(100,100))
nx.draw(G, graph_pos, nodelist=nodes, edgelist=edges, node_color=ncolors, node_size=sizes)
nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=labeldic)
nx.draw_networkx_labels(G, graph_pos)
else :
plt.figure(figsize=(20,20))
nx.draw(G, graph_pos, nodelist=nodes, node_color=ncolors, node_size=sizes)
nx.draw_networkx_labels(G, graph_pos)
plt.savefig(outPath+"_pathway.png")
GfileN = open(outPath+"_node.txt", 'w')
GfileE = open(outPath+"_edge.txt", 'w')
GfileN.write('Node\tColor\tSize\n')
for n in G.nodes() :
GfileN.write(n+'\t'+colorDict[n]+'\t'+str(sizeDict[n])+'\n')
GfileE.write('Node1\tNode2\tCommonGenes\n')
for (a, b) in G.edges() :
GfileE.write(a+'\t'+b+'\t'+str(labeldic[(a,b)])+'\n')
GfileN.close()
GfileE.close()
| gpl-3.0 |
rgommers/statsmodels | statsmodels/sandbox/distributions/otherdist.py | 33 | 10145 | '''Parametric Mixture Distributions
Created on Sat Jun 04 2011
Author: Josef Perktold
Notes:
Compound Poisson has mass point at zero
http://en.wikipedia.org/wiki/Compound_Poisson_distribution
and would need special treatment
need a distribution that has discrete mass points and contiuous range, e.g.
compound Poisson, Tweedie (for some parameter range),
pdf of Tobit model (?) - truncation with clipping
Question: Metaclasses and class factories for generating new distributions from
existing distributions by transformation, mixing, compounding
'''
from __future__ import print_function
import numpy as np
from scipy import stats
class ParametricMixtureD(object):
'''mixtures with a discrete distribution
The mixing distribution is a discrete distribution like scipy.stats.poisson.
All distribution in the mixture of the same type and parameterized
by the outcome of the mixing distribution and have to be a continuous
distribution (or have a pdf method).
As an example, a mixture of normal distributed random variables with
Poisson as the mixing distribution.
assumes vectorized shape, loc and scale as in scipy.stats.distributions
assume mixing_dist is frozen
initialization looks fragile for all possible cases of lower and upper
bounds of the distributions.
'''
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parameterized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support)
def rvs(self, size=1):
mrvs = self.mixing_dist.rvs(size)
#TODO: check strange cases ? this assumes continous integers
mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)
bd_args = tuple(md[mrvs_idx] for md in self.bd_args)
bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)
kwds = {'size':size}
kwds.update(bd_kwds)
rvs = self.base_dist.rvs(*self.bd_args, **kwds)
return rvs, mrvs_idx
def pdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
def cdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
#try:
class ClippedContinuous(object):
'''clipped continuous distribution with a masspoint at clip_lower
Notes
-----
first version, to try out possible designs
insufficient checks for valid arguments and not clear
whether it works for distributions that have compact support
clip_lower is fixed and independent of the distribution parameters.
The clip_lower point in the pdf has to be interpreted as a mass point,
i.e. different treatment in integration and expect function, which means
none of the generic methods for this can be used.
maybe this will be better designed as a mixture between a degenerate or
discrete and a continuous distribution
Warning: uses equality to check for clip_lower values in function
arguments, since these are floating points, the comparison might fail
if clip_lower values are not exactly equal.
We could add a check whether the values are in a small neighborhood, but
it would be expensive (need to search and check all values).
'''
def __init__(self, base_dist, clip_lower):
self.base_dist = base_dist
self.clip_lower = clip_lower
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds
def rvs(self, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
rvs_ = self.base_dist.rvs(*args, **kwds)
#same as numpy.clip ?
rvs_[rvs_ < clip_lower] = clip_lower
return rvs_
def pdf(self, x, *args, **kwds):
x = np.atleast_1d(x)
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))
clip_mask = (x == self.clip_lower)
if np.any(clip_mask):
clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
pdf_raw[x < clip_lower] = 0
return pdf_raw
def cdf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
cdf_raw = self.base_dist.cdf(x, *args, **kwds)
#not needed if equality test is used
## clip_mask = (x == self.clip_lower)
## if np.any(clip_mask):
## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
## pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
#if self.a is defined
cdf_raw[x < clip_lower] = 0
return cdf_raw
def sf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
sf_raw = self.base_dist.sf(x, *args, **kwds)
sf_raw[x <= clip_lower] = 1
return sf_raw
def ppf(self, x, *args, **kwds):
raise NotImplementedError
def plot(self, x, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
mass = self.pdf(clip_lower, *args, **kwds)
xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))
import matplotlib.pyplot as plt
#x = np.linspace(-4, 4, 21)
#plt.figure()
plt.xlim(clip_lower-0.1, x.max())
#remove duplicate calculation
xpdf = self.pdf(x, *args, **kwds)
plt.ylim(0, max(mass, xpdf.max())*1.1)
plt.plot(xr, self.pdf(xr, *args, **kwds))
#plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))
plt.stem([clip_lower], [mass],
linefmt='b-', markerfmt='bo', basefmt='r-')
return
if __name__ == '__main__':
doplots = 1
#*********** Poisson-Normal Mixture
mdist = stats.poisson(2.)
bdist = stats.norm
bd_args_fn = lambda x: ()
#bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}
bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}
pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)
print(pd.pdf(1))
p, bp = pd.pdf(np.linspace(0,20,21))
pc, bpc = pd.cdf(np.linspace(0,20,21))
print(pd.rvs())
rvs, m = pd.rvs(size=1000)
if doplots:
import matplotlib.pyplot as plt
plt.hist(rvs, bins = 100)
plt.title('poisson mixture of normal distributions')
#********** clipped normal distribution (Tobit)
bdist = stats.norm
clip_lower_ = 0. #-0.5
cnorm = ClippedContinuous(bdist, clip_lower_)
x = np.linspace(1e-8, 4, 11)
print(cnorm.pdf(x))
print(cnorm.cdf(x))
if doplots:
#plt.figure()
#cnorm.plot(x)
plt.figure()
cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))
plt.title('clipped normal distribution')
fig = plt.figure()
for i, loc in enumerate([0., 0.5, 1.,2.]):
fig.add_subplot(2,2,i+1)
cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))
plt.title('clipped normal, loc = %3.2f' % loc)
loc = 1.5
rvs = cnorm.rvs(loc=loc, size=2000)
plt.figure()
plt.hist(rvs, bins=50)
plt.title('clipped normal rvs, loc = %3.2f' % loc)
#plt.show()
| bsd-3-clause |
timcera/tsgettoolbox | tsgettoolbox/ulmo/usgs/eddn/core.py | 1 | 10407 | # -*- coding: utf-8 -*-
"""
ulmo.usgs.eddn.core
~~~~~~~~~~~~~~~~~~~~~
This module provides access to data provided by the `United States Geological
Survey`_ `Emergency Data Distribution Network`_ web site.
The `DCP message format`_ includes some header information that is parsed and
the message body, with a variable number of characters. The format of the
message body varies widely depending on the manufacturer of the transmitter,
data logger, sensors, and the technician who programmed the DCP. The body can
be simple ASCII, sometime with parameter codes and time-stamps embedded,
sometimes not. The body can also be in 'Pseudo-Binary' which is character
encoding of binary data that uses 6 bits of every byte and guarantees that
all characters are printable.
.. _United States Geological Survey: http://www.usgs.gov/
.. _Emergency Data Distribution Network: http://eddn.usgs.gov/
.. _http://eddn.usgs.gov/dcpformat.html
"""
import logging
import os
import re
import shutil
from datetime import datetime, timedelta
import isodate
import pandas as pd
import requests
from bs4 import BeautifulSoup
from past.builtins import basestring
from tsgettoolbox.ulmo import util
from . import parsers
# eddn query base url
EDDN_URL = "http://eddn.usgs.gov/cgi-bin/retrieveData.pl?%s"
# default file path (appended to default ulmo path)
DEFAULT_FILE_PATH = "usgs/eddn/"
# configure logging
LOG_FORMAT = "%(message)s"
logging.basicConfig(format=LOG_FORMAT)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def decode(dataframe, parser, **kwargs):
"""decodes dcp message data in pandas dataframe returned by ulmo.usgs.eddn.get_data().
Parameters
----------
dataframe : pandas.DataFrame
pandas.DataFrame returned by ulmo.usgs.eddn.get_data()
parser : {function, str}
function that acts on dcp_message each row of the dataframe and returns a new dataframe
containing several rows of decoded data. This returned dataframe may have different
(but derived) timestamps than that the original row. If a string is passed then a matching
parser function is looked up from ulmo.usgs.eddn.parsers
Returns
-------
decoded_data : pandas.DataFrame
pandas dataframe, the format and parameters in the returned dataframe depend wholly on the parser used
"""
if isinstance(parser, basestring):
parser = getattr(parsers, parser)
df = []
for timestamp, data in dataframe.iterrows():
parsed = parser(data, **kwargs)
parsed.dropna(how="all", inplace=True)
if not parsed.empty:
df.append(parsed)
df = pd.concat(df)
return df
def get_data(
dcp_address,
start=None,
end=None,
networklist="",
channel="",
spacecraft="Any",
baud="Any",
electronic_mail="",
dcp_bul="",
glob_bul="",
timing="",
retransmitted="Y",
daps_status="N",
use_cache=False,
cache_path=None,
as_dataframe=True,
):
"""Fetches GOES Satellite DCP messages from USGS Emergency Data Distribution Network.
Parameters
----------
dcp_address : str, iterable of strings
DCP address or list of DCP addresses to be fetched; lists will be joined by a ','.
start : {``None``, str, datetime, datetime.timedelta}
If ``None`` (default) then the start time is 2 days prior (or date of last data if cache is used)
If a datetime or datetime like string is specified it will be used as the start date.
If a timedelta or string in ISO 8601 period format (e.g 'P2D' for a period of 2 days) then
'now' minus the timedelta will be used as the start.
NOTE: The EDDN service does not specify how far back data is available. The service also imposes
a maximum data limit of 25000 character. If this is limit reached multiple requests will be made
until all available data is downloaded.
end : {``None``, str, datetime, datetime.timedelta}
If ``None`` (default) then the end time is 'now'
If a datetime or datetime like string is specified it will be used as the end date.
If a timedelta or string in ISO 8601 period format (e.g 'P2D' for a period of 2 days) then
'now' minus the timedelta will be used as the end.
NOTE: The EDDN service does not specify how far back data is available. The service also imposes
a maximum data limit of 25000 character.
networklist : str,
'' (default). Filter by network.
channel : str,
'' (default). Filter by channel.
spacecraft : str,
East, West, Any (default). Filter by GOES East/West Satellite
baud : str,
'Any' (default). Filter by baud rate. See http://eddn.usgs.gov/msgaccess.html for options
electronic_mail : str,
'' (default) or 'Y'
dcp_bul : str,
'' (default) or 'Y'
glob_bul : str,
'' (default) or 'Y'
timing : str,
'' (default) or 'Y'
retransmitted : str,
'Y' (default) or 'N'
daps_status : str,
'N' (default) or 'Y'
use_cache : bool,
If True (default) use hdf file to cache data and retrieve new data on subsequent requests
cache_path : {``None``, str},
If ``None`` use default ulmo location for cached files otherwise use specified path. files are named
using dcp_address.
as_dataframe : bool
If True (default) return data in a pandas dataframe otherwise return a dict.
Returns
-------
message_data : {pandas.DataFrame, dict}
Either a pandas dataframe or a dict indexed by dcp message times
"""
if isinstance(dcp_address, list):
dcp_address = ",".join(dcp_address)
data = pd.DataFrame()
if use_cache:
dcp_data_path = _get_store_path(cache_path, dcp_address + ".h5")
if os.path.exists(dcp_data_path):
data = pd.read_hdf(dcp_data_path, dcp_address)
if start:
drs_since = _format_time(start)
else:
try:
drs_since = _format_time(data["message_timestamp_utc"][-1])
except:
drs_since = "now -2 days"
if end:
drs_until = _format_time(end)
else:
drs_until = "now"
params = {}
params["DCP_ADDRESS"] = dcp_address
params["DRS_SINCE"] = drs_since
params["DRS_UNTIL"] = drs_until
params["NETWORKLIST"] = networklist
params["CHANNEL"] = channel
params["BEFORE"] = ("//START\n",)
params["AFTER"] = ("\n//END\n",)
params["SPACECRAFT"] = spacecraft
params["BAUD"] = baud
params["ELECTRONIC_MAIL"] = electronic_mail
params["DCP_BUL"] = dcp_bul
params["GLOB_BUL"] = glob_bul
params["TIMING"] = timing
params["RETRANSMITTED"] = retransmitted
params["DAPS_STATUS"] = daps_status
data_limit_reached = True
messages = []
while data_limit_reached:
new_message, data_limit_reached = _fetch_url(params)
messages += new_message
if data_limit_reached:
params["DRS_UNTIL"] = _format_time(
_parse(new_message[-1])["message_timestamp_utc"]
)
new_data = pd.DataFrame([_parse(row) for row in messages])
if not new_data.empty:
new_data.index = new_data.message_timestamp_utc
data = new_data.combine_first(data)
data.sort_index(inplace=True)
if use_cache:
# write to a tmp file and move to avoid ballooning h5 file
tmp = dcp_data_path + ".tmp"
data.to_hdf(tmp, dcp_address)
shutil.move(tmp, dcp_data_path)
if data.empty:
if as_dataframe:
return data
else:
return {}
if start:
if start.startswith("P"):
start = data["message_timestamp_utc"][-1] - isodate.parse_duration(start)
data = data[start:]
if end:
if end.startswith("P"):
end = data["message_timestamp_utc"][-1] - isodate.parse_duration(end)
data = data[:end]
if not as_dataframe:
data = data.T.to_dict()
return data
def _fetch_url(params):
r = requests.get(EDDN_URL, params=params)
log.info("data requested using url: %s\n" % r.url)
soup = BeautifulSoup(r.text)
message = soup.find("pre").contents[0].replace("\n", "").replace("\r", " ")
data_limit_reached = False
if "Max data limit reached" in message:
data_limit_reached = True
log.info("Max data limit reached, making new request for older data\n")
if not message:
log.info("No data found\n")
message = []
else:
message = [
msg[1].strip()
for msg in re.findall("(//START)(.*?)(//END)", message, re.M | re.S)
]
return message, data_limit_reached
def _format_period(period):
days, hours, minutes = (
period.days,
period.seconds // 3600,
(period.seconds // 60) % 60,
)
if minutes:
return "now -%s minutes" % period.seconds / 60
if hours:
return "now -%s hours" % period.seconds / 3600
if days:
return "now -%s days" % days
def _format_time(timestamp):
if isinstance(timestamp, basestring):
if timestamp.startswith("P"):
timestamp = isodate.parse_duration(timestamp)
else:
timestamp = isodate.parse_datetime(timestamp)
if isinstance(timestamp, datetime):
return timestamp.strftime("%Y/%j %H:%M:%S")
elif isinstance(timestamp, timedelta):
return _format_period(timestamp)
def _get_store_path(path, default_file_name):
if path is None:
path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)
if not os.path.exists(path):
os.makedirs(path)
return os.path.join(path, default_file_name)
def _parse(line):
return {
"dcp_address": line[:8],
"message_timestamp_utc": datetime.strptime(line[8:19], "%y%j%H%M%S"),
"failure_code": line[19:20],
"signal_strength": line[20:22],
"frequency_offset": line[22:24],
"modulation_index": line[24:25],
"data_quality_indicator": line[25:26],
"goes_receive_channel": line[26:29],
"goes_spacecraft_indicator": line[29:30],
"uplink_carrier_status": line[30:32],
"message_data_length": line[32:37],
"dcp_message": line[37:],
}
| bsd-3-clause |
qe-team/marmot | marmot/experiment/learning_utils.py | 1 | 9015 | # utils for interfacing with Scikit-Learn
import logging
import numpy as np
import copy
from multiprocessing import Pool
from sklearn.metrics import f1_score
from marmot.learning.pystruct_sequence_learner import PystructSequenceLearner
from marmot.experiment.import_utils import call_for_each_element
from marmot.experiment.preprocessing_utils import flatten, fit_binarizers, binarize
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
# TODO: allow specification of cross-validation params at init time
def init_classifier(classifier_type, args=None):
if args is not None:
return classifier_type(*args)
return classifier_type()
def train_classifier(X, y, classifier):
classifier.fit(X, y)
def map_classifiers(all_contexts, tags, classifier_type, data_type='plain', classifier_args=None):
if data_type == 'plain':
assert(type(all_contexts) == np.ndarray or type(all_contexts) == list)
logger.info('training classifier')
classifier = init_classifier(classifier_type, classifier_args)
classifier.fit(all_contexts, tags)
return classifier
elif data_type == 'token':
assert(type(all_contexts) == dict)
classifier_map = {}
for token, contexts in all_contexts.items():
logger.info('training classifier for token: {}'.format(token.encode('utf-8')))
token_classifier = init_classifier(classifier_type, classifier_args)
token_classifier.fit(contexts, tags[token])
classifier_map[token] = token_classifier
return classifier_map
def predict_all(test_features, classifier_map, data_type='plain'):
if data_type == 'plain':
predictions = classifier_map.predict(test_features)
return predictions
elif data_type == 'token':
test_predictions = {}
for key, features in test_features.iteritems():
try:
classifier = classifier_map[key]
predictions = classifier.predict(features)
test_predictions[key] = predictions
except KeyError as e:
print(key + " - is NOT in the classifier map")
raise
return test_predictions
def run_prediction((train_data, train_tags, test_data, test_tags, idx)):
logger.info('training sequential model...')
all_values = flatten(train_data)
# binarize
binarizers = fit_binarizers(all_values)
test_data = call_for_each_element(test_data, binarize, [binarizers], data_type='sequential')
train_data = call_for_each_element(train_data, binarize, [binarizers], data_type='sequential')
x_train = np.array([np.array(xi) for xi in train_data])
y_train = np.array([np.array(xi) for xi in train_tags])
x_test = np.array([np.array(xi) for xi in test_data])
y_test = np.array([np.array(xi) for xi in test_tags])
sequence_learner = PystructSequenceLearner()
sequence_learner.fit(x_train, y_train)
structured_hyp = sequence_learner.predict(x_test)
logger.info('scoring sequential model...')
flattened_hyp = flatten(structured_hyp)
flattened_ref = flatten(y_test)
test_tags = flattened_ref
logger.info('Structured prediction f1: ')
cur_res = f1_score(flattened_ref, flattened_hyp, average=None)
logger.info('[ {}, {} ], {}'.format(cur_res[0], cur_res[1], f1_score(flattened_ref, flattened_hyp, pos_label=None)))
return (cur_res, idx)
# remove the feature number <idx>
def get_reduced_set(features_list, idx):
new_features_list = [obj[:idx] + obj[idx+1:] for obj in features_list]
return new_features_list
# train the model on all combinations of the feature set without one element
# TODO: the target metric should be tunable (now the f1 score of BAD class)
def selection_epoch(old_result, train_data, train_tags, test_data, test_tags, feature_names, data_type='sequential'):
reduced_res = np.zeros((len(feature_names),))
max_res = old_result
reduced_train = train_data
reduced_test = test_data
reduced_features = feature_names
for idx, name in enumerate(feature_names):
logger.info("Excluding feature {}".format(name))
# new feature sets without the feature <idx>
cur_reduced_train = call_for_each_element(train_data, get_reduced_set, args=[idx], data_type=data_type)
cur_reduced_test = call_for_each_element(test_data, get_reduced_set, args=[idx], data_type=data_type)
# train a sequence labeller
if data_type == 'sequential':
cur_res = run_prediction((cur_reduced_train, train_tags, cur_reduced_test, test_tags, idx))
reduced_res[idx] = cur_res[0]
# if the result is better than previous -- save as maximum
if cur_res[0] > max_res:
max_res = cur_res[0]
reduced_train = cur_reduced_train
reduced_test = cur_reduced_test
reduced_features = feature_names[:idx] + feature_names[idx+1:]
# if better result is found -- return it
if max_res > old_result:
return (idx, max_res, reduced_train, reduced_test, reduced_features)
# none of the reduced sets worked better
else:
return (-1, old_result, [], [], [])
def selection_epoch_multi(old_result, train_data, train_tags, test_data, test_tags, feature_names, workers, data_type='sequential'):
# reduced_res = np.zeros((len(feature_names),))
max_res = old_result
reduced_train = train_data
reduced_test = test_data
reduced_features = feature_names
parallel_data = []
for idx, name in enumerate(feature_names):
# new feature sets without the feature <idx>
cur_reduced_train = call_for_each_element(train_data, get_reduced_set, args=[idx], data_type=data_type)
cur_reduced_test = call_for_each_element(test_data, get_reduced_set, args=[idx], data_type=data_type)
parallel_data.append((cur_reduced_train, train_tags, cur_reduced_test, test_tags, idx))
# train a sequence labeller
if data_type == 'sequential':
pool = Pool(workers)
reduced_res = pool.map(run_prediction, parallel_data)
print "Multiprocessing output: ", reduced_res
all_res = [res[0][0] for res in reduced_res]
# some feature set produced better result
if max(all_res) > old_result:
odd_feature_num = reduced_res[np.argmax(all_res)][1]
reduced_train = call_for_each_element(train_data, get_reduced_set, args=[odd_feature_num], data_type=data_type)
reduced_test = call_for_each_element(test_data, get_reduced_set, args=[odd_feature_num], data_type=data_type)
reduced_features = feature_names[:odd_feature_num] + feature_names[odd_feature_num+1:]
logger.info("Old result: {}, new result: {}, removed feature is {}".format(old_result, max(all_res), feature_names[odd_feature_num]))
return (feature_names[odd_feature_num], max(all_res), reduced_train, reduced_test, reduced_features)
# none of the reduced sets worked better
else:
logger.info("No improvement on this round")
return ("", old_result, [], [], [])
def feature_selection(train_data, train_tags, test_data, test_tags, feature_names, data_type='sequential'):
tag_map = {u'OK': 1, u'BAD': 0}
train_tags = [[tag_map[tag] for tag in seq] for seq in train_tags]
test_tags = [[tag_map[tag] for tag in seq] for seq in test_tags]
full_set_result = run_prediction((train_data, train_tags, test_data, test_tags, 0))
logger.info("Feature selection")
odd_feature = None
baseline_res = full_set_result[0][0]
logger.info("Baseline result: {}".format(baseline_res))
reduced_train = copy.deepcopy(train_data)
reduced_test = copy.deepcopy(test_data)
reduced_features = copy.deepcopy(feature_names)
odd_feature_list = []
# reduce the feature set while there are any combinations that give better result
cnt = 1
old_max = baseline_res
while odd_feature != "" and len(reduced_features) > 1:
logger.info("Feature selection: round {}".format(cnt))
odd_feature, max_res, reduced_train, reduced_test, reduced_features = selection_epoch_multi(old_max, reduced_train, train_tags, reduced_test, test_tags, reduced_features, 10, data_type=data_type)
# odd_feature, reduced_train, reduced_test, reduced_features = selection_epoch(old_max, reduced_train, train_tags, reduced_test, test_tags, reduced_features, data_type=data_type)
odd_feature_list.append(odd_feature)
old_max = max_res
cnt += 1
# form a set of reduced feature names and feature numbers
new_feature_list = []
for feature in feature_names:
if feature not in odd_feature_list:
new_feature_list.append(feature)
logger.info("Feature selection is terminating, good features are: {}".format(' '.join(new_feature_list)))
return (new_feature_list, baseline_res, old_max)
| isc |
erikdejonge/youtube-dl | youtube_dl/extractor/peertube.py | 2 | 25248 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_resolution,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class PeerTubeIE(InfoExtractor):
_INSTANCES_RE = r'''(?:
# Taken from https://instances.joinpeertube.org/instances
peertube\.rainbowswingers\.net|
tube\.stanisic\.nl|
peer\.suiri\.us|
medias\.libox\.fr|
videomensoif\.ynh\.fr|
peertube\.travelpandas\.eu|
peertube\.rachetjay\.fr|
peertube\.montecsys\.fr|
tube\.eskuero\.me|
peer\.tube|
peertube\.umeahackerspace\.se|
tube\.nx-pod\.de|
video\.monsieurbidouille\.fr|
tube\.openalgeria\.org|
vid\.lelux\.fi|
video\.anormallostpod\.ovh|
tube\.crapaud-fou\.org|
peertube\.stemy\.me|
lostpod\.space|
exode\.me|
peertube\.snargol\.com|
vis\.ion\.ovh|
videosdulib\.re|
v\.mbius\.io|
videos\.judrey\.eu|
peertube\.osureplayviewer\.xyz|
peertube\.mathieufamily\.ovh|
www\.videos-libr\.es|
fightforinfo\.com|
peertube\.fediverse\.ru|
peertube\.oiseauroch\.fr|
video\.nesven\.eu|
v\.bearvideo\.win|
video\.qoto\.org|
justporn\.cc|
video\.vny\.fr|
peervideo\.club|
tube\.taker\.fr|
peertube\.chantierlibre\.org|
tube\.ipfixe\.info|
tube\.kicou\.info|
tube\.dodsorf\.as|
videobit\.cc|
video\.yukari\.moe|
videos\.elbinario\.net|
hkvideo\.live|
pt\.tux\.tf|
www\.hkvideo\.live|
FIGHTFORINFO\.com|
pt\.765racing\.com|
peertube\.gnumeria\.eu\.org|
nordenmedia\.com|
peertube\.co\.uk|
tube\.darfweb\.eu|
tube\.kalah-france\.org|
0ch\.in|
vod\.mochi\.academy|
film\.node9\.org|
peertube\.hatthieves\.es|
video\.fitchfamily\.org|
peertube\.ddns\.net|
video\.ifuncle\.kr|
video\.fdlibre\.eu|
tube\.22decembre\.eu|
peertube\.harmoniescreatives\.com|
tube\.fabrigli\.fr|
video\.thedwyers\.co|
video\.bruitbruit\.com|
peertube\.foxfam\.club|
peer\.philoxweb\.be|
videos\.bugs\.social|
peertube\.malbert\.xyz|
peertube\.bilange\.ca|
libretube\.net|
diytelevision\.com|
peertube\.fedilab\.app|
libre\.video|
video\.mstddntfdn\.online|
us\.tv|
peertube\.sl-network\.fr|
peertube\.dynlinux\.io|
peertube\.david\.durieux\.family|
peertube\.linuxrocks\.online|
peerwatch\.xyz|
v\.kretschmann\.social|
tube\.otter\.sh|
yt\.is\.nota\.live|
tube\.dragonpsi\.xyz|
peertube\.boneheadmedia\.com|
videos\.funkwhale\.audio|
watch\.44con\.com|
peertube\.gcaillaut\.fr|
peertube\.icu|
pony\.tube|
spacepub\.space|
tube\.stbr\.io|
v\.mom-gay\.faith|
tube\.port0\.xyz|
peertube\.simounet\.net|
play\.jergefelt\.se|
peertube\.zeteo\.me|
tube\.danq\.me|
peertube\.kerenon\.com|
tube\.fab-l3\.org|
tube\.calculate\.social|
peertube\.mckillop\.org|
tube\.netzspielplatz\.de|
vod\.ksite\.de|
peertube\.laas\.fr|
tube\.govital\.net|
peertube\.stephenson\.cc|
bistule\.nohost\.me|
peertube\.kajalinifi\.de|
video\.ploud\.jp|
video\.omniatv\.com|
peertube\.ffs2play\.fr|
peertube\.leboulaire\.ovh|
peertube\.tronic-studio\.com|
peertube\.public\.cat|
peertube\.metalbanana\.net|
video\.1000i100\.fr|
peertube\.alter-nativ-voll\.de|
tube\.pasa\.tf|
tube\.worldofhauru\.xyz|
pt\.kamp\.site|
peertube\.teleassist\.fr|
videos\.mleduc\.xyz|
conf\.tube|
media\.privacyinternational\.org|
pt\.forty-two\.nl|
video\.halle-leaks\.de|
video\.grosskopfgames\.de|
peertube\.schaeferit\.de|
peertube\.jackbot\.fr|
tube\.extinctionrebellion\.fr|
peertube\.f-si\.org|
video\.subak\.ovh|
videos\.koweb\.fr|
peertube\.zergy\.net|
peertube\.roflcopter\.fr|
peertube\.floss-marketing-school\.com|
vloggers\.social|
peertube\.iriseden\.eu|
videos\.ubuntu-paris\.org|
peertube\.mastodon\.host|
armstube\.com|
peertube\.s2s\.video|
peertube\.lol|
tube\.open-plug\.eu|
open\.tube|
peertube\.ch|
peertube\.normandie-libre\.fr|
peertube\.slat\.org|
video\.lacaveatonton\.ovh|
peertube\.uno|
peertube\.servebeer\.com|
peertube\.fedi\.quebec|
tube\.h3z\.jp|
tube\.plus200\.com|
peertube\.eric\.ovh|
tube\.metadocs\.cc|
tube\.unmondemeilleur\.eu|
gouttedeau\.space|
video\.antirep\.net|
nrop\.cant\.at|
tube\.ksl-bmx\.de|
tube\.plaf\.fr|
tube\.tchncs\.de|
video\.devinberg\.com|
hitchtube\.fr|
peertube\.kosebamse\.com|
yunopeertube\.myddns\.me|
peertube\.varney\.fr|
peertube\.anon-kenkai\.com|
tube\.maiti\.info|
tubee\.fr|
videos\.dinofly\.com|
toobnix\.org|
videotape\.me|
voca\.tube|
video\.heromuster\.com|
video\.lemediatv\.fr|
video\.up\.edu\.ph|
balafon\.video|
video\.ivel\.fr|
thickrips\.cloud|
pt\.laurentkruger\.fr|
video\.monarch-pass\.net|
peertube\.artica\.center|
video\.alternanet\.fr|
indymotion\.fr|
fanvid\.stopthatimp\.net|
video\.farci\.org|
v\.lesterpig\.com|
video\.okaris\.de|
tube\.pawelko\.net|
peertube\.mablr\.org|
tube\.fede\.re|
pytu\.be|
evertron\.tv|
devtube\.dev-wiki\.de|
raptube\.antipub\.org|
video\.selea\.se|
peertube\.mygaia\.org|
video\.oh14\.de|
peertube\.livingutopia\.org|
peertube\.the-penguin\.de|
tube\.thechangebook\.org|
tube\.anjara\.eu|
pt\.pube\.tk|
video\.samedi\.pm|
mplayer\.demouliere\.eu|
widemus\.de|
peertube\.me|
peertube\.zapashcanon\.fr|
video\.latavernedejohnjohn\.fr|
peertube\.pcservice46\.fr|
peertube\.mazzonetto\.eu|
video\.irem\.univ-paris-diderot\.fr|
video\.livecchi\.cloud|
alttube\.fr|
video\.coop\.tools|
video\.cabane-libre\.org|
peertube\.openstreetmap\.fr|
videos\.alolise\.org|
irrsinn\.video|
video\.antopie\.org|
scitech\.video|
tube2\.nemsia\.org|
video\.amic37\.fr|
peertube\.freeforge\.eu|
video\.arbitrarion\.com|
video\.datsemultimedia\.com|
stoptrackingus\.tv|
peertube\.ricostrongxxx\.com|
docker\.videos\.lecygnenoir\.info|
peertube\.togart\.de|
tube\.postblue\.info|
videos\.domainepublic\.net|
peertube\.cyber-tribal\.com|
video\.gresille\.org|
peertube\.dsmouse\.net|
cinema\.yunohost\.support|
tube\.theocevaer\.fr|
repro\.video|
tube\.4aem\.com|
quaziinc\.com|
peertube\.metawurst\.space|
videos\.wakapo\.com|
video\.ploud\.fr|
video\.freeradical\.zone|
tube\.valinor\.fr|
refuznik\.video|
pt\.kircheneuenburg\.de|
peertube\.asrun\.eu|
peertube\.lagob\.fr|
videos\.side-ways\.net|
91video\.online|
video\.valme\.io|
video\.taboulisme\.com|
videos-libr\.es|
tv\.mooh\.fr|
nuage\.acostey\.fr|
video\.monsieur-a\.fr|
peertube\.librelois\.fr|
videos\.pair2jeux\.tube|
videos\.pueseso\.club|
peer\.mathdacloud\.ovh|
media\.assassinate-you\.net|
vidcommons\.org|
ptube\.rousset\.nom\.fr|
tube\.cyano\.at|
videos\.squat\.net|
video\.iphodase\.fr|
peertube\.makotoworkshop\.org|
peertube\.serveur\.slv-valbonne\.fr|
vault\.mle\.party|
hostyour\.tv|
videos\.hack2g2\.fr|
libre\.tube|
pire\.artisanlogiciel\.net|
videos\.numerique-en-commun\.fr|
video\.netsyms\.com|
video\.die-partei\.social|
video\.writeas\.org|
peertube\.swarm\.solvingmaz\.es|
tube\.pericoloso\.ovh|
watching\.cypherpunk\.observer|
videos\.adhocmusic\.com|
tube\.rfc1149\.net|
peertube\.librelabucm\.org|
videos\.numericoop\.fr|
peertube\.koehn\.com|
peertube\.anarchmusicall\.net|
tube\.kampftoast\.de|
vid\.y-y\.li|
peertube\.xtenz\.xyz|
diode\.zone|
tube\.egf\.mn|
peertube\.nomagic\.uk|
visionon\.tv|
videos\.koumoul\.com|
video\.rastapuls\.com|
video\.mantlepro\.com|
video\.deadsuperhero\.com|
peertube\.musicstudio\.pro|
peertube\.we-keys\.fr|
artitube\.artifaille\.fr|
peertube\.ethernia\.net|
tube\.midov\.pl|
peertube\.fr|
watch\.snoot\.tube|
peertube\.donnadieu\.fr|
argos\.aquilenet\.fr|
tube\.nemsia\.org|
tube\.bruniau\.net|
videos\.darckoune\.moe|
tube\.traydent\.info|
dev\.videos\.lecygnenoir\.info|
peertube\.nayya\.org|
peertube\.live|
peertube\.mofgao\.space|
video\.lequerrec\.eu|
peertube\.amicale\.net|
aperi\.tube|
tube\.ac-lyon\.fr|
video\.lw1\.at|
www\.yiny\.org|
videos\.pofilo\.fr|
tube\.lou\.lt|
choob\.h\.etbus\.ch|
tube\.hoga\.fr|
peertube\.heberge\.fr|
video\.obermui\.de|
videos\.cloudfrancois\.fr|
betamax\.video|
video\.typica\.us|
tube\.piweb\.be|
video\.blender\.org|
peertube\.cat|
tube\.kdy\.ch|
pe\.ertu\.be|
peertube\.social|
videos\.lescommuns\.org|
tv\.datamol\.org|
videonaute\.fr|
dialup\.express|
peertube\.nogafa\.org|
megatube\.lilomoino\.fr|
peertube\.tamanoir\.foucry\.net|
peertube\.devosi\.org|
peertube\.1312\.media|
tube\.bootlicker\.party|
skeptikon\.fr|
video\.blueline\.mg|
tube\.homecomputing\.fr|
tube\.ouahpiti\.info|
video\.tedomum\.net|
video\.g3l\.org|
fontube\.fr|
peertube\.gaialabs\.ch|
tube\.kher\.nl|
peertube\.qtg\.fr|
video\.migennes\.net|
tube\.p2p\.legal|
troll\.tv|
videos\.iut-orsay\.fr|
peertube\.solidev\.net|
videos\.cemea\.org|
video\.passageenseine\.fr|
videos\.festivalparminous\.org|
peertube\.touhoppai\.moe|
sikke\.fi|
peer\.hostux\.social|
share\.tube|
peertube\.walkingmountains\.fr|
videos\.benpro\.fr|
peertube\.parleur\.net|
peertube\.heraut\.eu|
tube\.aquilenet\.fr|
peertube\.gegeweb\.eu|
framatube\.org|
thinkerview\.video|
tube\.conferences-gesticulees\.net|
peertube\.datagueule\.tv|
video\.lqdn\.fr|
tube\.mochi\.academy|
media\.zat\.im|
video\.colibris-outilslibres\.org|
tube\.svnet\.fr|
peertube\.video|
peertube3\.cpy\.re|
peertube2\.cpy\.re|
videos\.tcit\.fr|
peertube\.cpy\.re
)'''
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
_VALID_URL = r'''(?x)
(?:
peertube:(?P<host>[^:]+):|
https?://(?P<host_2>%s)/(?:videos/(?:watch|embed)|api/v\d/videos)/
)
(?P<id>%s)
''' % (_INSTANCES_RE, _UUID_RE)
_TESTS = [{
'url': 'https://peertube.cpy.re/videos/watch/2790feb0-8120-4e63-9af3-c943c69f5e6c',
'md5': '80f24ff364cc9d333529506a263e7feb',
'info_dict': {
'id': '2790feb0-8120-4e63-9af3-c943c69f5e6c',
'ext': 'mp4',
'title': 'wow',
'description': 'wow such video, so gif',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
'timestamp': 1519297480,
'upload_date': '20180222',
'uploader': 'Luclu7',
'uploader_id': '7fc42640-efdb-4505-a45d-a15b1a5496f1',
'uploder_url': 'https://peertube.nsa.ovh/accounts/luclu7',
'license': 'Unknown',
'duration': 3,
'view_count': int,
'like_count': int,
'dislike_count': int,
'tags': list,
'categories': list,
}
}, {
'url': 'https://peertube.tamanoir.foucry.net/videos/watch/0b04f13d-1e18-4f1d-814e-4979aa7c9c44',
'only_matching': True,
}, {
# nsfw
'url': 'https://tube.22decembre.eu/videos/watch/9bb88cd3-9959-46d9-9ab9-33d2bb704c39',
'only_matching': True,
}, {
'url': 'https://tube.22decembre.eu/videos/embed/fed67262-6edb-4d1c-833b-daa9085c71d7',
'only_matching': True,
}, {
'url': 'https://tube.openalgeria.org/api/v1/videos/c1875674-97d0-4c94-a058-3f7e64c962e8',
'only_matching': True,
}, {
'url': 'peertube:video.blender.org:b37a5b9f-e6b5-415c-b700-04a5cd6ec205',
'only_matching': True,
}]
@staticmethod
def _extract_peertube_url(webpage, source_url):
mobj = re.match(
r'https?://(?P<host>[^/]+)/videos/(?:watch|embed)/(?P<id>%s)'
% PeerTubeIE._UUID_RE, source_url)
if mobj and any(p in webpage for p in (
'<title>PeerTube<',
'There will be other non JS-based clients to access PeerTube',
'>We are sorry but it seems that PeerTube is not compatible with your web browser.<')):
return 'peertube:%s:%s' % mobj.group('host', 'id')
@staticmethod
def _extract_urls(webpage, source_url):
entries = re.findall(
r'''(?x)<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//%s/videos/embed/%s)'''
% (PeerTubeIE._INSTANCES_RE, PeerTubeIE._UUID_RE), webpage)
if not entries:
peertube_url = PeerTubeIE._extract_peertube_url(webpage, source_url)
if peertube_url:
entries = [peertube_url]
return entries
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host') or mobj.group('host_2')
video_id = mobj.group('id')
video = self._download_json(
'https://%s/api/v1/videos/%s' % (host, video_id), video_id)
title = video['name']
formats = []
for file_ in video['files']:
if not isinstance(file_, dict):
continue
file_url = url_or_none(file_.get('fileUrl'))
if not file_url:
continue
file_size = int_or_none(file_.get('size'))
format_id = try_get(
file_, lambda x: x['resolution']['label'], compat_str)
f = parse_resolution(format_id)
f.update({
'url': file_url,
'format_id': format_id,
'filesize': file_size,
})
formats.append(f)
self._sort_formats(formats)
def account_data(field):
return try_get(video, lambda x: x['account'][field], compat_str)
category = try_get(video, lambda x: x['category']['label'], compat_str)
categories = [category] if category else None
nsfw = video.get('nsfw')
if nsfw is bool:
age_limit = 18 if nsfw else 0
else:
age_limit = None
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': urljoin(url, video.get('thumbnailPath')),
'timestamp': unified_timestamp(video.get('publishedAt')),
'uploader': account_data('displayName'),
'uploader_id': account_data('uuid'),
'uploder_url': account_data('url'),
'license': try_get(
video, lambda x: x['licence']['label'], compat_str),
'duration': int_or_none(video.get('duration')),
'view_count': int_or_none(video.get('views')),
'like_count': int_or_none(video.get('likes')),
'dislike_count': int_or_none(video.get('dislikes')),
'age_limit': age_limit,
'tags': try_get(video, lambda x: x['tags'], list),
'categories': categories,
'formats': formats,
}
| unlicense |
benjaminhmtan/benjaminhmtan.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
ronak3shah1/ml_lab_ecsc_306 | labwork/lab2/sci-learn/non_linear_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| apache-2.0 |
ahoyosid/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
bjackman/lisa | libs/utils/analysis/tasks_analysis.py | 2 | 29151 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tasks Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
import re
from analysis_module import AnalysisModule
from devlib.utils.misc import memoized
from trappy.utils import listify
class TasksAnalysis(AnalysisModule):
"""
Support for Tasks signals analysis.
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(TasksAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_top_big_tasks(self, min_samples=100, min_utilization=None):
"""
Tasks which had 'utilization' samples bigger than the specified
threshold
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
"""
if not self._trace.hasEvents('sched_load_avg_task'):
self._log.warning('Events [sched_load_avg_task] not found')
return None
if min_utilization is None:
min_utilization = self._little_cap
# Get utilization samples >= min_utilization
df = self._dfg_trace_event('sched_load_avg_task')
big_tasks_events = df[df.util_avg > min_utilization]
if not len(big_tasks_events):
self._log.warning('No tasks with with utilization samples > %d',
min_utilization)
return None
# Report the number of tasks which match the min_utilization condition
big_tasks = big_tasks_events.pid.unique()
self._log.info('%5d tasks with samples of utilization > %d',
len(big_tasks), min_utilization)
# Compute number of samples above threshold
big_tasks_stats = big_tasks_events.groupby('pid')\
.describe(include=['object'])
big_tasks_stats = big_tasks_stats.unstack()['comm']\
.sort_values(by=['count'], ascending=False)
# Filter for number of occurrences
big_tasks_stats = big_tasks_stats[big_tasks_stats['count'] > min_samples]
if not len(big_tasks_stats):
self._log.warning(' but none with more than %d samples',
min_samples)
return None
self._log.info(' %d with more than %d samples',
len(big_tasks_stats), min_samples)
# Add task name column
big_tasks_stats['comm'] = big_tasks_stats.index.map(
lambda pid: self._trace.getTaskByPid(pid))
# Filter columns of interest
big_tasks_stats = big_tasks_stats[['count', 'comm']]
big_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
return big_tasks_stats
def _dfg_top_wakeup_tasks(self, min_wakeups=100):
"""
Tasks which wakeup more frequently than a specified threshold.
:param min_wakeups: minimum number of wakeups
:type min_wakeups: int
"""
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Events [sched_wakeup] not found')
return None
df = self._dfg_trace_event('sched_wakeup')
# Compute number of wakeups above threshold
wkp_tasks_stats = df.groupby('pid').describe(include=['object'])
wkp_tasks_stats = wkp_tasks_stats.unstack()['comm']\
.sort_values(by=['count'], ascending=False)
# Filter for number of occurrences
wkp_tasks_stats = wkp_tasks_stats[
wkp_tasks_stats['count'] > min_wakeups]
if not len(df):
self._log.warning('No tasks with more than %d wakeups',
len(wkp_tasks_stats))
return None
self._log.info('%5d tasks with more than %d wakeups',
len(df), len(wkp_tasks_stats))
# Add task name column
wkp_tasks_stats['comm'] = wkp_tasks_stats.index.map(
lambda pid: self._trace.getTaskByPid(pid))
# Filter columns of interest
wkp_tasks_stats = wkp_tasks_stats[['count', 'comm']]
wkp_tasks_stats.rename(columns={'count': 'samples'}, inplace=True)
return wkp_tasks_stats
def _dfg_rt_tasks(self, min_prio=100):
"""
Tasks with RT priority
NOTE: priorities uses scheduler values, thus: the lower the value the
higher is the task priority.
RT Priorities: [ 0..100]
FAIR Priorities: [101..120]
:param min_prio: minumum priority
:type min_prio: int
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Events [sched_switch] not found')
return None
df = self._dfg_trace_event('sched_switch')
# Filters tasks which have a priority bigger than threshold
df = df[df.next_prio <= min_prio]
# Filter columns of interest
rt_tasks = df[['next_pid', 'next_prio']]
# Remove all duplicateds
rt_tasks = rt_tasks.drop_duplicates()
# Order by priority
rt_tasks.sort_values(by=['next_prio', 'next_pid'], ascending=True,
inplace=True)
rt_tasks.rename(columns={'next_pid': 'pid', 'next_prio': 'prio'},
inplace=True)
# Set PID as index
rt_tasks.set_index('pid', inplace=True)
# Add task name column
rt_tasks['comm'] = rt_tasks.index.map(
lambda pid: self._trace.getTaskByPid(pid))
return rt_tasks
###############################################################################
# Plotting Methods
###############################################################################
def plotTasks(self, tasks, signals=None):
"""
Generate a common set of useful plots for each of the specified tasks
This method allows to filter which signals should be plot, if data are
available in the input trace. The list of signals supported are:
Tasks signals plot:
load_avg, util_avg, boosted_util, sched_overutilized
Tasks residencies on CPUs:
residencies, sched_overutilized
Tasks PELT signals:
load_sum, util_sum, period_contrib, sched_overutilized
At least one of the previous signals must be specified to get a valid
plot.
Addidional custom signals can be specified and they will be represented
in the "Task signals plots" if they represent valid keys of the task
load/utilization trace event (e.g. sched_load_avg_task).
Note:
sched_overutilized: enable the plotting of overutilization bands on
top of each subplot
residencies: enable the generation of the CPUs residencies plot
:param tasks: the list of task names and/or PIDs to plot.
Numerical PIDs and string task names can be mixed
in the same list.
:type tasks: list(str) or list(int)
:param signals: list of signals (and thus plots) to generate
default: all the plots and signals available in the
current trace
:type signals: list(str)
"""
if not signals:
signals = ['load_avg', 'util_avg', 'boosted_util',
'sched_overutilized',
'load_sum', 'util_sum', 'period_contrib',
'residencies']
# Check for the minimum required signals to be available
if not self._trace.hasEvents('sched_load_avg_task'):
self._log.warning('Events [sched_load_avg_task] not found, '
'plot DISABLED!')
return
# Defined list of tasks to plot
if tasks and \
not isinstance(tasks, str) and \
not isinstance(tasks, list):
raise ValueError('Wrong format for tasks parameter')
if tasks:
tasks_to_plot = listify(tasks)
else:
raise ValueError('No tasks to plot specified')
# Compute number of plots to produce
plots_count = 0
plots_signals = [
# Fist plot: task's utilization
{'load_avg', 'util_avg', 'boosted_util'},
# Second plot: task residency
{'residencies'},
# Third plot: tasks's load
{'load_sum', 'util_sum', 'period_contrib'}
]
hr = []
ysize = 0
for plot_id, signals_to_plot in enumerate(plots_signals):
signals_to_plot = signals_to_plot.intersection(signals)
if len(signals_to_plot):
plots_count = plots_count + 1
# Use bigger size only for the first plot
hr.append(3 if plot_id == 0 else 1)
ysize = ysize + (8 if plot_id else 4)
# Grid
gs = gridspec.GridSpec(plots_count, 1, height_ratios=hr)
gs.update(wspace=0.1, hspace=0.1)
# Build list of all PIDs for each task_name to plot
pids_to_plot = []
for task in tasks_to_plot:
# Add specified PIDs to the list
if isinstance(task, int):
pids_to_plot.append(task)
continue
# Otherwise: add all the PIDs for task with the specified name
pids_to_plot.extend(self._trace.getTaskByName(task))
for tid in pids_to_plot:
savefig = False
task_name = self._trace.getTaskByPid(tid)
self._log.info('Plotting [%d:%s]...', tid, task_name)
plot_id = 0
# For each task create a figure with plots_count plots
plt.figure(figsize=(16, ysize))
plt.suptitle('Task Signals',
y=.94, fontsize=16, horizontalalignment='center')
# Plot load and utilization
signals_to_plot = {'load_avg', 'util_avg', 'boosted_util'}
signals_to_plot = list(signals_to_plot.intersection(signals))
if len(signals_to_plot) > 0:
axes = plt.subplot(gs[plot_id, 0])
axes.set_title('Task [{0:d}:{1:s}] Signals'
.format(tid, task_name))
plot_id = plot_id + 1
is_last = (plot_id == plots_count)
self._plotTaskSignals(axes, tid, signals, is_last)
savefig = True
# Plot CPUs residency
signals_to_plot = {'residencies'}
signals_to_plot = list(signals_to_plot.intersection(signals))
if len(signals_to_plot) > 0:
axes = plt.subplot(gs[plot_id, 0])
axes.set_title(
'Task [{0:d}:{1:s}] Residency (green: LITTLE, red: big)'
.format(tid, task_name)
)
plot_id = plot_id + 1
is_last = (plot_id == plots_count)
if 'sched_overutilized' in signals:
signals_to_plot.append('sched_overutilized')
self._plotTaskResidencies(axes, tid, signals_to_plot, is_last)
savefig = True
# Plot PELT signals
signals_to_plot = {'load_sum', 'util_sum', 'period_contrib'}
signals_to_plot = list(signals_to_plot.intersection(signals))
if len(signals_to_plot) > 0:
axes = plt.subplot(gs[plot_id, 0])
axes.set_title('Task [{0:d}:{1:s}] PELT Signals'
.format(tid, task_name))
plot_id = plot_id + 1
if 'sched_overutilized' in signals:
signals_to_plot.append('sched_overutilized')
self._plotTaskPelt(axes, tid, signals_to_plot)
savefig = True
if not savefig:
self._log.warning('Nothing to plot for %s', task_name)
continue
# Save generated plots into datadir
if isinstance(task_name, list):
task_name = re.sub('[:/]', '_', task_name[0])
else:
task_name = re.sub('[:/]', '_', task_name)
figname = '{}/{}task_util_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
tid, task_name)
pl.savefig(figname, bbox_inches='tight')
def plotBigTasks(self, max_tasks=10, min_samples=100,
min_utilization=None):
"""
For each big task plot utilization and show the smallest cluster
capacity suitable for accommodating task utilization.
:param max_tasks: maximum number of tasks to consider
:type max_tasks: int
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
"""
# Get PID of big tasks
big_frequent_task_df = self._dfg_top_big_tasks(
min_samples, min_utilization)
if max_tasks > 0:
big_frequent_task_df = big_frequent_task_df.head(max_tasks)
big_frequent_task_pids = big_frequent_task_df.index.values
big_frequent_tasks_count = len(big_frequent_task_pids)
if big_frequent_tasks_count == 0:
self._log.warning('No big/frequent tasks to plot')
return
# Get the list of events for all big frequent tasks
df = self._dfg_trace_event('sched_load_avg_task')
big_frequent_tasks_events = df[df.pid.isin(big_frequent_task_pids)]
# Define axes for side-by-side plottings
fig, axes = plt.subplots(big_frequent_tasks_count, 1,
figsize=(16, big_frequent_tasks_count*4))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
plot_idx = 0
for pid, group in big_frequent_tasks_events.groupby('pid'):
# # Build task names (there could be multiple, during the task lifetime)
task_name = 'Task [%d:%s]'.format(pid, self._trace.getTaskByPid(pid))
# Plot title
if big_frequent_tasks_count == 1:
ax = axes
else:
ax = axes[plot_idx]
ax.set_title(task_name)
# Left axis: utilization
ax = group.plot(y=['util_avg', 'min_cluster_cap'],
style=['r.', '-b'],
drawstyle='steps-post',
linewidth=1,
ax=ax)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.set_ylim(0, 1100)
ax.set_ylabel('util_avg')
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
plot_idx += 1
ax.set_xlabel('Time [s]')
self._log.info('Tasks which have been a "utilization" of %d for at least %d samples',
self._little_cap, min_samples)
def plotWakeupTasks(self, max_tasks=10, min_wakeups=0, per_cluster=False):
"""
Show waking up tasks over time and newly forked tasks in two separate
plots.
:param max_tasks: maximum number of tasks to consider
:param max_tasks: int
:param min_wakeups: minimum number of wakeups of each task
:type min_wakeups: int
:param per_cluster: if True get per-cluster wakeup events
:type per_cluster: bool
"""
if per_cluster is True and \
not self._trace.hasEvents('sched_wakeup_new'):
self._log.warning('Events [sched_wakeup_new] not found, '
'plots DISABLED!')
return
elif not self._trace.hasEvents('sched_wakeup') and \
not self._trace.hasEvents('sched_wakeup_new'):
self._log.warning('Events [sched_wakeup, sched_wakeup_new] not found, '
'plots DISABLED!')
return
# Define axes for side-by-side plottings
fig, axes = plt.subplots(2, 1, figsize=(14, 5))
plt.subplots_adjust(wspace=0.2, hspace=0.3)
if per_cluster:
# Get per cluster wakeup events
df = self._dfg_trace_event('sched_wakeup_new')
big_frequent = df.target_cpu.isin(self._big_cpus)
ntbc = df[big_frequent]
ntbc_count = len(ntbc)
little_frequent = df.target_cpu.isin(self._little_cpus)
ntlc = df[little_frequent];
ntlc_count = len(ntlc)
self._log.info('%5d tasks forked on big cluster (%3.1f %%)',
ntbc_count,
100. * ntbc_count / (ntbc_count + ntlc_count))
self._log.info('%5d tasks forked on LITTLE cluster (%3.1f %%)',
ntlc_count,
100. * ntlc_count / (ntbc_count + ntlc_count))
ax = axes[0]
ax.set_title('Tasks Forks on big CPUs');
ntbc.pid.plot(style=['g.'], ax=ax);
ax.set_xlim(self._trace.x_min, self._trace.x_max);
ax.set_xticklabels([])
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
ax = axes[1]
ax.set_title('Tasks Forks on LITTLE CPUs');
ntlc.pid.plot(style=['g.'], ax=ax);
ax.set_xlim(self._trace.x_min, self._trace.x_max);
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
return
# Keep events of defined big tasks
wkp_task_pids = self._dfg_top_wakeup_tasks(min_wakeups)
if len(wkp_task_pids):
wkp_task_pids = wkp_task_pids.index.values[:max_tasks]
self._log.info('Plotting %d frequent wakeup tasks',
len(wkp_task_pids))
ax = axes[0]
ax.set_title('Tasks WakeUps Events')
df = self._dfg_trace_event('sched_wakeup')
if len(df):
df = df[df.pid.isin(wkp_task_pids)]
df.pid.astype(int).plot(style=['b.'], ax=ax)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
ax = axes[1]
ax.set_title('Tasks Forks Events')
df = self._dfg_trace_event('sched_wakeup_new')
if len(df):
df = df[df.pid.isin(wkp_task_pids)]
df.pid.astype(int).plot(style=['r.'], ax=ax)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
def plotBigTasksVsCapacity(self, min_samples=1,
min_utilization=None, big_cluster=True):
"""
Draw a plot that shows whether tasks are placed on the correct cluster
based on their utilization and cluster capacity. Green dots mean the
task was placed on the correct cluster, Red means placement was wrong
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
:param big_cluster:
:type big_cluster: bool
"""
if not self._trace.hasEvents('sched_load_avg_task'):
self._log.warning('Events [sched_load_avg_task] not found')
return
if not self._trace.hasEvents('cpu_frequency'):
self._log.warning('Events [cpu_frequency] not found')
return
if big_cluster:
cluster_correct = 'big'
cpus = self._big_cpus
else:
cluster_correct = 'LITTLE'
cpus = self._little_cpus
# Get all utilization update events
df = self._dfg_trace_event('sched_load_avg_task')
# Keep events of defined big tasks
big_task_pids = self._dfg_top_big_tasks(
min_samples, min_utilization)
if big_task_pids is not None:
big_task_pids = big_task_pids.index.values
df = df[df.pid.isin(big_task_pids)]
if not df.size:
self._log.warning('No events for tasks with more then %d utilization '
'samples bigger than %d, plots DISABLED!')
return
fig, axes = plt.subplots(2, 1, figsize=(14, 5))
plt.subplots_adjust(wspace=0.2, hspace=0.3)
# Add column of expected cluster depending on:
# a) task utilization value
# b) capacity of the selected cluster
bu_bc = ((df['util_avg'] > self._little_cap) &
(df['cpu'].isin(self._big_cpus)))
su_lc = ((df['util_avg'] <= self._little_cap) &
(df['cpu'].isin(self._little_cpus)))
# The Cluster CAPacity Matches the UTILization (ccap_mutil) iff:
# - tasks with util_avg > little_cap are running on a BIG cpu
# - tasks with util_avg <= little_cap are running on a LITTLe cpu
df.loc[:,'ccap_mutil'] = np.select([(bu_bc | su_lc)], [True], False)
df_freq = self._dfg_trace_event('cpu_frequency')
df_freq = df_freq[df_freq.cpu == cpus[0]]
ax = axes[0]
ax.set_title('Tasks Utilization vs Allocation')
for ucolor, umatch in zip('gr', [True, False]):
cdata = df[df['ccap_mutil'] == umatch]
if len(cdata) > 0:
cdata['util_avg'].plot(ax=ax,
style=[ucolor+'.'], legend=False)
ax.set_xlim(self._trace.x_min, self._trace.x_max)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
ax = axes[1]
ax.set_title('Frequencies on "{}" cluster'.format(cluster_correct))
df_freq['frequency'].plot(style=['-b'], ax=ax, drawstyle='steps-post')
ax.set_xlim(self._trace.x_min, self._trace.x_max);
ax.grid(True)
self._trace.analysis.status.plotOverutilized(ax)
legend_y = axes[0].get_ylim()[1]
axes[0].annotate('Utilization-Capacity Matches',
xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes[0].annotate('Task schduled (green) or not (red) on min cluster',
xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
fontsize=14)
###############################################################################
# Utility Methods
###############################################################################
def _plotTaskSignals(self, axes, tid, signals, is_last=False):
"""
For task with ID `tid` plot the specified signals.
:param axes: axes over which to generate the plot
:type axes: :mod:`matplotlib.axes.Axes`
:param tid: task ID
:type tid: int
:param signals: signals to be plot
:param signals: list(str)
:param is_last: if True this is the last plot
:type is_last: bool
"""
# Get dataframe for the required task
util_df = self._dfg_trace_event('sched_load_avg_task')
# Plot load and util
signals_to_plot = set(signals).difference({'boosted_util'})
for signal in signals_to_plot:
if signal not in util_df.columns:
continue
data = util_df[util_df.pid == tid][signal]
data.plot(ax=axes, drawstyle='steps-post', legend=True)
# Plot boost utilization if available
if 'boosted_util' in signals and \
self._trace.hasEvents('sched_boost_task'):
boost_df = self._dfg_trace_event('sched_boost_task')
data = boost_df[boost_df.pid == tid][['boosted_util']]
if len(data):
data.plot(ax=axes, style=['y-'], drawstyle='steps-post')
else:
task_name = self._trace.getTaskByPid(tid)
self._log.warning('No "boosted_util" data for task [%d:%s]',
tid, task_name)
# Add Capacities data if avilable
if 'nrg_model' in self._platform:
nrg_model = self._platform['nrg_model']
max_lcap = nrg_model['little']['cpu']['cap_max']
max_bcap = nrg_model['big']['cpu']['cap_max']
tip_lcap = 0.8 * max_lcap
tip_bcap = 0.8 * max_bcap
self._log.debug(
'LITTLE capacity tip/max: %d/%d, big capacity tip/max: %d/%d',
tip_lcap, max_lcap, tip_bcap, max_bcap
)
axes.axhline(tip_lcap, color='y', linestyle=':', linewidth=2)
axes.axhline(max_lcap, color='y', linestyle='--', linewidth=2)
axes.axhline(tip_bcap, color='r', linestyle=':', linewidth=2)
axes.axhline(max_bcap, color='r', linestyle='--', linewidth=2)
axes.set_ylim(0, 1100)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.grid(True)
if not is_last:
axes.set_xticklabels([])
axes.set_xlabel('')
if 'sched_overutilized' in signals:
self._trace.analysis.status.plotOverutilized(axes)
def _plotTaskResidencies(self, axes, tid, signals, is_last=False):
"""
For task with ID `tid` plot residency information.
:param axes: axes over which to generate the plot
:type axes: :mod:`matplotlib.axes.Axes`
:param tid: task ID
:type tid: int
:param signals: signals to be plot
:param signals: list(str)
:param is_last: if True this is the last plot
:type is_last: bool
"""
util_df = self._dfg_trace_event('sched_load_avg_task')
if 'cluster' in util_df:
data = util_df[util_df.pid == tid][['cluster', 'cpu']]
for ccolor, clabel in zip('gr', ['LITTLE', 'big']):
cdata = data[data.cluster == clabel]
if len(cdata) > 0:
cdata.plot(ax=axes, style=[ccolor+'+'], legend=False)
# Y Axis - placeholders for legend, acutal CPUs. topmost empty lane
cpus = [str(n) for n in range(self._platform['cpus_count'])]
ylabels = [''] + cpus
axes.set_yticklabels(ylabels)
axes.set_ylim(-1, len(cpus))
axes.set_ylabel('CPUs')
# X Axis
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.grid(True)
if not is_last:
axes.set_xticklabels([])
axes.set_xlabel('')
if 'sched_overutilized' in signals:
self._trace.analysis.status.plotOverutilized(axes)
def _plotTaskPelt(self, axes, tid, signals):
"""
For task with ID `tid` plot PELT-related signals.
:param axes: axes over which to generate the plot
:type axes: :mod:`matplotlib.axes.Axes`
:param tid: task ID
:type tid: int
:param signals: signals to be plot
:param signals: list(str)
"""
util_df = self._dfg_trace_event('sched_load_avg_task')
data = util_df[util_df.pid == tid][['load_sum',
'util_sum',
'period_contrib']]
data.plot(ax=axes, drawstyle='steps-post')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.ticklabel_format(style='scientific', scilimits=(0, 0),
axis='y', useOffset=False)
axes.grid(True)
if 'sched_overutilized' in signals:
self._trace.analysis.status.plotOverutilized(axes)
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 |
bkendzior/scipy | scipy/stats/_discrete_distns.py | 5 | 21973 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from scipy._lib._numpy_compat import broadcast_to
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters where n is the number of
successes, whereas p is the probability of a single success.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = np.maximum(N-(M-n), 0)
self.b = np.minimum(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
def _stats(self, p):
r = special.log1p(-p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
self.a = np.where(lambda_ > 0, 0, -np.inf)
self.b = np.where(lambda_ > 0, np.inf, 0)
return lambda_ != 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if self._size is not None:
# Numpy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to self._size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = broadcast_to(low, self._size)
high = broadcast_to(high, self._size)
randint = np.vectorize(self._random_state.randint, otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
cyberphox/MissionPlanner | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-3.0 |
DR08/mxnet | docs/mxdoc.py | 7 | 12702 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, blk_lang, lines in _get_blocks(lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download_btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 |
slundberg/shap | tests/explainers/test_gpu_tree.py | 1 | 6842 | # pylint: disable=missing-function-docstring
""" Test gpu accelerated tree functions.
"""
import sklearn
import pytest
import numpy as np
import shap
from shap.utils import assert_import
try:
assert_import("cext_gpu")
except ImportError:
pytestmark = pytest.mark.skip("cuda module not built")
def test_front_page_xgboost():
xgboost = pytest.importorskip("xgboost")
# load JS visualization code to notebook
shap.initjs()
# train XGBoost model
X, y = shap.datasets.boston()
model = xgboost.train({"learning_rate": 0.01}, xgboost.DMatrix(X, label=y), 100)
# explain the model's predictions using SHAP values
explainer = shap.GPUTreeExplainer(model)
shap_values = explainer.shap_values(X)
# visualize the first prediction's explaination
shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])
# visualize the training set predictions
shap.force_plot(explainer.expected_value, shap_values, X)
# create a SHAP dependence plot to show the effect of a single feature across the whole dataset
shap.dependence_plot(5, shap_values, X, show=False)
shap.dependence_plot("RM", shap_values, X, show=False)
# summarize the effects of all the features
shap.summary_plot(shap_values, X, show=False)
rs = np.random.RandomState(15921) # pylint: disable=no-member
n = 100
m = 4
datasets = {'regression': (rs.randn(n, m), rs.randn(n)),
'binary': (rs.randn(n, m), rs.binomial(1, 0.5, n)),
'multiclass': (rs.randn(n, m), rs.randint(0, 5, n))}
def task_xfail(func):
def inner():
return pytest.param(func(), marks=pytest.mark.xfail)
return inner
def xgboost_base():
# pylint: disable=import-outside-toplevel
try:
import xgboost
except ImportError:
return pytest.param("xgboost.XGBRegressor", marks=pytest.mark.skip)
X, y = datasets['regression']
model = xgboost.XGBRegressor(tree_method="hist")
model.fit(X, y)
return model.get_booster(), X, model.predict(X)
def xgboost_regressor():
# pylint: disable=import-outside-toplevel
try:
import xgboost
except ImportError:
return pytest.param("xgboost.XGBRegressor", marks=pytest.mark.skip)
X, y = datasets['regression']
model = xgboost.XGBRegressor()
model.fit(X, y)
return model, X, model.predict(X)
def xgboost_binary_classifier():
# pylint: disable=import-outside-toplevel
try:
import xgboost
except ImportError:
return pytest.param("xgboost.XGBClassifier", marks=pytest.mark.skip)
X, y = datasets['binary']
model = xgboost.XGBClassifier(eval_metric='error')
model.fit(X, y)
return model, X, model.predict(X, output_margin=True)
def xgboost_multiclass_classifier():
# pylint: disable=import-outside-toplevel
try:
import xgboost
except ImportError:
return pytest.param("xgboost.XGBClassifier", marks=pytest.mark.skip)
X, y = datasets['multiclass']
model = xgboost.XGBClassifier()
model.fit(X, y)
return model, X, model.predict(X, output_margin=True)
def lightgbm_base():
# pylint: disable=import-outside-toplevel
try:
import lightgbm
except ImportError:
return pytest.param("lightgbm.LGBMRegressor", marks=pytest.mark.skip)
X, y = datasets['regression']
model = lightgbm.LGBMRegressor()
model.fit(X, y)
return model.booster_, X, model.predict(X)
def lightgbm_regression():
# pylint: disable=import-outside-toplevel
try:
import lightgbm
except ImportError:
return pytest.param("lightgbm.LGBMRegressor", marks=pytest.mark.skip)
X, y = datasets['regression']
model = lightgbm.LGBMRegressor()
model.fit(X, y)
return model, X, model.predict(X)
def lightgbm_binary_classifier():
# pylint: disable=import-outside-toplevel
try:
import lightgbm
except ImportError:
return pytest.param("lightgbm.LGBMClassifier", marks=pytest.mark.skip)
X, y = datasets['binary']
model = lightgbm.LGBMClassifier()
model.fit(X, y)
return model, X, model.predict(X, raw_score=True)
def lightgbm_multiclass_classifier():
# pylint: disable=import-outside-toplevel
try:
import lightgbm
except ImportError:
return pytest.param("lightgbm.LGBMClassifier", marks=pytest.mark.skip)
X, y = datasets['multiclass']
model = lightgbm.LGBMClassifier()
model.fit(X, y)
return model, X, model.predict(X, raw_score=True)
def rf_regressor():
X, y = datasets['regression']
model = sklearn.ensemble.RandomForestRegressor()
model.fit(X, y)
return model, X, model.predict(X)
def rf_binary_classifier():
X, y = datasets['binary']
model = sklearn.ensemble.RandomForestClassifier()
model.fit(X, y)
return model, X, model.predict_proba(X)
def rf_multiclass_classifier():
X, y = datasets['multiclass']
model = sklearn.ensemble.RandomForestClassifier()
model.fit(X, y)
return model, X, model.predict_proba(X)
tasks = [xgboost_base(), xgboost_regressor(), xgboost_binary_classifier(),
xgboost_multiclass_classifier(), lightgbm_base(), lightgbm_regression(),
lightgbm_binary_classifier(), lightgbm_multiclass_classifier(), rf_binary_classifier(),
rf_regressor(), rf_multiclass_classifier()]
# pretty print tasks
def idfn(task):
if isinstance(task, str):
return task
model, _, _ = task
return type(model).__module__ + '.' + type(model).__qualname__
@pytest.mark.parametrize("task", tasks, ids=idfn)
@pytest.mark.parametrize("feature_perturbation", ["interventional", "tree_path_dependent"])
def test_gpu_tree_explainer_shap(task, feature_perturbation):
model, X, _ = task
gpu_ex = shap.GPUTreeExplainer(model, X, feature_perturbation=feature_perturbation)
ex = shap.TreeExplainer(model, X, feature_perturbation=feature_perturbation)
host_shap = ex.shap_values(X, check_additivity=True)
gpu_shap = gpu_ex.shap_values(X, check_additivity=True)
# Check outputs roughly the same as CPU algorithm
assert np.allclose(ex.expected_value, gpu_ex.expected_value, 1e-3, 1e-3)
assert np.allclose(host_shap, gpu_shap, 1e-3, 1e-3)
@pytest.mark.parametrize("task", tasks, ids=idfn)
@pytest.mark.parametrize("feature_perturbation", ["tree_path_dependent"])
def test_gpu_tree_explainer_shap_interactions(task, feature_perturbation):
model, X, margin = task
ex = shap.GPUTreeExplainer(model, X, feature_perturbation=feature_perturbation)
shap_values = np.array(ex.shap_interaction_values(X), copy=False)
assert np.abs(np.sum(shap_values, axis=(len(shap_values.shape) - 1, len(
shap_values.shape) - 2)).T + ex.expected_value - margin).max() < 1e-4, \
"SHAP values don't sum to model output!"
| mit |
nikitasingh981/scikit-learn | examples/classification/plot_lda_qda.py | 32 | 5381 | """
====================================================================
Linear and Quadratic Discriminant Analysis with covariance ellipsoid
====================================================================
This example plots the covariance ellipsoids of each class and
decision boundary learned by LDA and QDA. The ellipsoids display
the double standard deviation for each class. With LDA, the
standard deviation is the same for all the classes, while each
class has its own standard deviation with QDA.
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
alpha = 0.5
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha,
color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha,
color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha,
color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha,
color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color, edgecolor='yellow',
linewidth=2, zorder=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
nophie-123/sms-tools | software/transformations_interface/hpsMorph_function.py | 24 | 7354 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile1='../../sounds/violin-B3.wav', window1='blackman', M1=1001, N1=1024, t1=-100,
minSineDur1=0.05, nH=60, minf01=200, maxf01=300, f0et1=10, harmDevSlope1=0.01, stocf=0.1,
inputFile2='../../sounds/soprano-E4.wav', window2='blackman', M2=901, N2=1024, t2=-100,
minSineDur2=0.05, minf02=250, maxf02=500, f0et2=10, harmDevSlope2=0.01):
"""
Analyze two sounds with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; stocEnv: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sounds
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
# compute analysis windows
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
# compute the harmonic plus stochastic models
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram stochastic component of sound 1
plt.subplot(2,1,1)
numFrames = int(stocEnv1[:,0].size)
sizeEnv = int(stocEnv1[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs1)
binFreq = (.5*fs1)*np.arange(sizeEnv*maxplotfreq/(.5*fs1))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv1[:,:sizeEnv*maxplotfreq/(.5*fs1)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 1
if (hfreq1.shape[1] > 0):
harms = np.copy(hfreq1)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs1)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram of sound 1')
# plot spectrogram stochastic component of sound 2
plt.subplot(2,1,2)
numFrames = int(stocEnv2[:,0].size)
sizeEnv = int(stocEnv2[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs2)
binFreq = (.5*fs2)*np.arange(sizeEnv*maxplotfreq/(.5*fs2))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv2[:,:sizeEnv*maxplotfreq/(.5*fs2)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 2
if (hfreq2.shape[1] > 0):
harms = np.copy(hfreq2)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs2)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram of sound 2')
plt.tight_layout()
plt.show(block=False)
return inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2
def transformation_synthesis(inputFile1, fs, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2,
hfreqIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1]), hmagIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1]), stocIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1])):
"""
Transform the analysis values returned by the analysis function and synthesize the sound
inputFile1: name of input file 1
fs: sampling rate of input file 1
hfreq1, hmag1, stocEnv1: hps representation of sound 1
inputFile2: name of input file 2
hfreq2, hmag2, stocEnv2: hps representation of sound 2
hfreqIntp: interpolation factor between the harmonic frequencies of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
hmagIntp: interpolation factor between the harmonic magnitudes of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
stocIntp: interpolation factor between the stochastic representation of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# morph the two sounds
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
# synthesis
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile1)[:-4] + '_hpsMorph.wav'
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = np.copy(yhfreq)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2 = analysis()
# transformation and synthesis
transformation_synthesis (inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2)
plt.show()
| agpl-3.0 |
jason-neal/equanimous-octo-tribble | octotribble/extraction/dracs_quicklooks2017.py | 1 | 10650 | # DRACS Output quicklook/status.
# Plot all 8 reduced spectra
# Plot all 8 normalized reduced spectra
# plot mean combined and median combined spectra.
from __future__ import division, print_function
import argparse
import os
import sys
from os.path import join
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from tqdm import tqdm
from octotribble.Get_filenames import get_filenames
def _parser():
"""Take care of all the argparse stuff.
:returns: the args
"""
parser = argparse.ArgumentParser(description="Make Dracs quicklook plots.")
parser.add_argument(
"-p", "--showplots", help="Show the plots.", default=False, action="store_true"
)
parser.add_argument(
"-c",
"--chip",
help="Specify chip to plot. Default is all.",
choices=["1", "2", "3", "4"],
default=None,
)
parser.add_argument(
"-b",
"--band",
help="Specify band to plot. Default is all.",
choices=["1", "2", "3"],
default=None,
)
args = parser.parse_args()
return args
def main(chip=None, band=None, showplots=False):
if chip is None:
chip = range(1, 5)
else:
chip = [int(chip)]
if band is None:
band = range(3)
else:
chip = [int(band)]
# To run
dir_path = os.getcwd()
intermediate_path = os.path.join(dir_path, "Intermediate_steps")
combined_path = os.path.join(dir_path, "Combined_Nods")
image_path = os.path.join(dir_path, "images")
observation_name = os.path.split(dir_path)[-1]
for chip_num in tqdm(chip):
print("Starting Chip # {}".format(chip_num))
combined_name = get_filenames(
combined_path, "CRIRE*norm.sum.fits", "*_{0}.*".format(chip_num)
)
nod_names = get_filenames(
intermediate_path, "CRIRE*.ms.fits", "*_{0}.*".format(chip_num)
)
norm_names = get_filenames(
intermediate_path, "CRIRE*.ms.norm.fits", "*_{0}.*".format(chip_num)
)
print("combined_names", combined_name)
print("nod names", nod_names)
print("norm names", norm_names)
combined_data = fits.getdata(join(combined_path, combined_name[0]))
print("length of combined_data =", len(combined_data))
if len(combined_data) == 3:
optimal_median = []
optimal_mean = []
non_optimal_median = []
non_optimal_mean = []
for indx in band:
print("index of extras ", indx)
nod_data = [fits.getdata(join(intermediate_path, name))[indx, 0] for name in
nod_names]
norm_data = [fits.getdata(join(intermediate_path, name))[indx, 0] for name
in norm_names]
median_nod = np.median(
norm_data, axis=0
) # Median combine normalzied spectra
mean_nod = combined_data[indx][0] # From norm.sum.fits file.
mean_median_diff = mean_nod - median_nod # For plot 4
diff_mask = np.abs(mean_median_diff) > 0.025
mean_median_diff[diff_mask] = np.nan
mean_mask = (mean_nod > 1.15) | (mean_nod < 0.0)
mean_nod[mean_mask] = np.nan
median_mask = (median_nod > 1.15) | (median_nod < 0.0)
median_nod[median_mask] = np.nan
# Plot Results
fig = plt.figure(figsize=(10, 10))
plt.suptitle(
"{0}, Chip-{1}".format(observation_name, chip_num), fontsize=16
)
ax1 = plt.subplot(411)
for i, data in enumerate(nod_data):
# add some limits to data displayed, help the scaling.
data_mask = (data > 4 * np.median(data)) | (data < 0.0)
data[data_mask] = np.nan
ax1.plot(data, label=i + 1)
plt.ylabel("Intensity")
plt.title("Extracted Nod Spectra")
plt.xlim([0, 1024])
# start, end = ax1.get_ylim()
# ax1.yaxis.set_ticks(np.arange(start, end, 2000))
plt.tight_layout(pad=2.5, w_pad=0.0, h_pad=0.5)
ax1.legend()
# del nod_data
print("starting plot 2")
ax2 = plt.subplot(412)
for data in norm_data:
data_mask = (data > 4 * 1.2) | (data < 0.0)
data[data_mask] = np.nan
ax2.plot(data)
plt.ylabel("Normalized\nIntensity")
plt.title("Normalized Nod Spectra")
plt.xlim([0, 1024])
start, end = ax2.get_ylim()
# ax2.yaxis.set_ticks(np.arange(start, end, 0.1))
print("starting plot 3")
ax3 = plt.subplot(413)
ax3.plot(mean_nod, label="Nod Mean")
ax3.plot(median_nod, "--r", label="Nod Median")
# plt.xlabel("Pixel Position", fontsize=14)
plt.ylabel("Normalized\nIntensity")
plt.title("Combined Nod Spectra")
ax3.legend(loc=0)
plt.xlim([0, 1024])
# start, end = ax3.get_ylim()
# ax3.yaxis.set_ticks(np.arange(start, end, 0.1))
plt.tight_layout(pad=2.5, w_pad=0.0, h_pad=0.5)
# plt.show()
print("starting plot 4")
mean_median_diff = mean_nod - median_nod
mean_median_diff[
np.abs(mean_median_diff) > 0.02
] = np.nan # mask out the large differences.
ax4 = plt.subplot(414)
ax4.plot(mean_median_diff, label="Mean-median")
plt.xlabel("Pixel Position", fontsize=10)
plt.ylabel("Flux diff")
plt.title("Mean-median difference.")
ax4.legend(loc=0)
plt.xlim([0, 1024])
start, end = ax4.get_ylim()
# ax4.yaxis.set_ticks(np.arange(start, end, 0.01))
plt.tight_layout(pad=2.5, w_pad=0.0, h_pad=0.5)
if showplots:
plt.show()
print("Saving ...")
# Save figure
# fig.savefig(image_path + "quicklook_{0}_{1}_reduction_band{2}.pdf".format(observation_name, chip_num,
# indx + 1))
fig.savefig(join(
image_path, "quicklook_{0}_{1}_reduction_band{2}.png".format(
observation_name, chip_num, indx + 1
)
))
plt.close(fig)
if indx == 0:
optimal_median = median_nod
optimal_mean = mean_nod
print(np.max(optimal_mean))
print(np.max(optimal_median))
elif indx == 1:
non_optimal_median = median_nod
non_optimal_mean = mean_nod
print(np.max(non_optimal_mean))
print(np.max(non_optimal_median))
diff_of_mean = optimal_mean - non_optimal_mean
diff_of_median = optimal_median - non_optimal_median
print("diff_of_mean", diff_of_mean)
# After looping though orders plot difference between mean and median spectra
fig = plt.figure(figsize=(10, 8))
plt.suptitle(
"{0}, Chip-{1}".format(observation_name, chip_num), fontsize=16
)
ax1 = plt.subplot(111)
plt.plot(diff_of_mean, label="mean")
plt.plot(diff_of_median, "--", label="medain")
plt.ylim([-0.02, 0.02]) # Limit to +- 2%
plt.title("Differences between optimal - non-optimal combined spectra.")
plt.ylabel("Flux diff")
plt.legend(loc=0)
fig.savefig(join(image_path,
"combine_diff_{0}_{1}_reduction_opt_minus_nonopt.png".format(
observation_name, chip_num
))
)
if showplots:
plt.show()
plt.close(fig)
else:
nod_data = [fits.getdata(join(intermediate_path, name)) for name in nod_names]
norm_data = [fits.getdata(join(intermediate_path, name)) for name in norm_names]
median_nod = np.median(
norm_data, axis=0
) # Median combine normalzied spectra
# Plot Reuslts
fig = plt.figure()
plt.suptitle(
"{0}, Chip-{1}".format(observation_name, chip_num), fontsize=16
)
ax1 = plt.subplot(311)
for i, data in enumerate(nod_data):
ax1.plot(data, label=i + 1)
plt.ylabel("Intensity")
plt.title("Extracted Nod Spectra")
plt.xlim([0, 1024])
# start, end = ax1.get_ylim()
# ax1.yaxis.set_ticks(np.arange(start, end, 2000))
# ax1.legend()
ax2 = plt.subplot(312)
for data in norm_data:
ax2.plot(data)
plt.ylabel("Normalized\nIntensity")
plt.title("Normalized Nod Spectra")
plt.xlim([0, 1024])
start, end = ax2.get_ylim()
ax2.yaxis.set_ticks(np.arange(start, end, 0.1))
ax3 = plt.subplot(313)
ax3.plot(combined_data, label="Nod Mean")
ax3.plot(median_nod, "--r", label="Nod Median")
plt.xlabel("Pixel Position", fontsize=14)
plt.ylabel("Normalized\nIntensity")
plt.title("Combined Nod Spectra")
plt.legend(loc=0)
plt.xlim([0, 1024])
start, end = ax3.get_ylim()
ax3.yaxis.set_ticks(np.arange(start, end, 0.1))
plt.tight_layout(pad=2.5, w_pad=0.0, h_pad=0.5)
if showplots:
plt.show()
# Save figure
fig.savefig(join(
image_path, "quicklook_{0}_{1}_reduction.pdf".format(observation_name, chip_num)
))
fig.savefig(join(
image_path,
"quicklook_{0}_{1}_reduction.png".format(observation_name, chip_num)
))
plt.close(fig)
return 0
if __name__ == "__main__":
args = vars(_parser())
opts = {k: args[k] for k in args}
sys.exit(main(**opts))
| mit |
SanPen/CopperPlate | Gui/matplotlibwidget.py | 1 | 8175 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtWidgets import *
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as Navigationtoolbar
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
plt.style.use('fivethirtyeight')
# plt.ion()
class MplCanvas(FigureCanvas):
def __init__(self):
self.press = None
self.cur_xlim = None
self.cur_ylim = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.xpress = None
self.ypress = None
self.zoom_x_limits = None
self.zoom_y_limits = None
self.fig = Figure()
self.ax = self.fig.add_subplot(111, axisbg='white')
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
scale = 1.2
f = self.zoom_factory(self.ax, base_scale=scale)
# p = self.pan_factory(self.ax)
self.dragged = None
self.element_dragged = None
self.pick_pos = (0, 0)
self.is_point = False
self.index = None
# Connect events and callbacks
# self.fig.canvas.mpl_connect("pick_event", self.on_pick_event)
# self.fig.canvas.mpl_connect("button_release_event", self.on_release_event)
def setTitle(self, text):
"""
Sets the figure title
"""
self.fig.suptitle(text)
def set_graph_mode(self):
"""
Sets the borders to nicely display graphs
"""
self.fig.subplots_adjust(left=0, bottom=0, right=1, top=0.9, wspace=0, hspace=0)
# def on_pick_event(self, event):
# """
# Store which text object was picked and were the pick event occurs.
# """
#
# if isinstance(event.artist, Text):
# self.element_dragged = event.artist
# self.pick_pos = (event.mouseevent.xdata, event.mouseevent.ydata)
# self.is_point = False
# else:
# self.element_dragged = event.artist
# self.pick_pos = (event.mouseevent.xdata, event.mouseevent.ydata)
# self.is_point = True
# self.index = event.ind
#
# return True
#
# def on_release_event(self, event):
# " Update text position and redraw"
#
# if self.element_dragged is not None :
# if self.is_point:
# old_pos = self.element_dragged.get_offsets()[self.index][0]
# else:
# old_pos = self.element_dragged.get_position()
#
# new_pos = (old_pos[0] + event.xdata - self.pick_pos[0],
# old_pos[1] + event.ydata - self.pick_pos[1])
#
# if self.is_point:
# osets = self.element_dragged.get_offsets()
# osets[self.index] = new_pos
# self.element_dragged.set_offsets(osets)
# else:
# self.element_dragged.set_position(new_pos)
#
# self.element_dragged = None
# self.ax.figure.canvas.draw()
# return True
def zoom_factory(self, ax, base_scale=1.2):
"""
Mouse zoom handler
"""
def zoom(event):
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
# print(event.button)
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
self.zoom_x_limits = [xdata - new_width * (1-relx), xdata + new_width * relx]
self.zoom_y_limits = [ydata - new_height * (1-rely), ydata + new_height * rely]
# print(self.zoom_x_limits)
# print(self.zoom_y_limits)
ax.set_xlim(self.zoom_x_limits )
ax.set_ylim(self.zoom_y_limits)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom
def rec_zoom(self):
self.zoom_x_limits = self.ax.get_xlim()
self.zoom_y_limits = self.ax.get_ylim()
def set_last_zoom(self):
if self.zoom_x_limits is not None:
self.ax.set_xlim(self.zoom_x_limits )
self.ax.set_ylim(self.zoom_y_limits)
def pan_factory(self, ax):
"""
Mouse pan handler
"""
def onPress(event):
if event.inaxes != ax:
return
self.cur_xlim = ax.get_xlim()
self.cur_ylim = ax.get_ylim()
self.press = self.x0, self.y0, event.xdata, event.ydata
self.x0, self.y0, self.xpress, self.ypress = self.press
def onRelease(event):
self.press = None
ax.figure.canvas.draw()
def onMotion(event):
if self.press is None:
return
if event.inaxes != ax:
return
dx = event.xdata - self.xpress
dy = event.ydata - self.ypress
self.cur_xlim -= dx
self.cur_ylim -= dy
ax.set_xlim(self.cur_xlim)
ax.set_ylim(self.cur_ylim)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
# attach the call back
fig.canvas.mpl_connect('button_press_event',onPress)
fig.canvas.mpl_connect('button_release_event',onRelease)
fig.canvas.mpl_connect('motion_notify_event',onMotion)
# return the function
return onMotion
class MatplotlibWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.frame = QWidget()
self.canvas = MplCanvas()
self.canvas.setParent(self.frame)
self.mpltoolbar = Navigationtoolbar(self.canvas, self.frame)
self.vbl = QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.vbl.addWidget(self.mpltoolbar)
self.setLayout(self.vbl)
self.mpltoolbar.toggleViewAction()
def setTitle(self, text):
"""
Sets the figure title
"""
self.canvas.setTitle(text)
def get_axis(self):
return self.canvas.ax
def get_figure(self):
return self.canvas.fig
def clear(self, force=False):
if force:
self.canvas.fig.clear()
self.canvas.ax = self.canvas.fig.add_subplot(111)
# self.canvas.ax.clear()
# self.canvas = MplCanvas()
else:
self.canvas.ax.clear()
def redraw(self):
self.canvas.ax.figure.canvas.draw()
def plot(self, x, y, title='', xlabel='', ylabel=''):
self.setTitle(title)
self.canvas.ax.plot(x, y)
self.redraw()
| gpl-3.0 |
sbrisard/janus | sphinx/tutorials/square_basic/square_basic.py | 1 | 3972 | # Begin: imports
import itertools
import numpy as np
import matplotlib.pyplot as plt
import janus.green as green
import janus.fft.serial as fft
import janus.material.elastic.linear.isotropic as material
import janus.operators as operators
from janus.operators import isotropic_4
# End: imports
# Begin: init
class Example:
def __init__(self, mat_i, mat_m, mat_0, n, a=0.5, dim=3):
self.mat_i = mat_i
self.mat_m = mat_m
self.n = n
shape = tuple(itertools.repeat(n, dim))
# ...
# End: init
# Begin: create (C_i - C_0) and (C_m - C_0)
# ...
delta_C_i = isotropic_4(dim*(mat_i.k-mat_0.k),
2*(mat_i.g-mat_0.g), dim)
delta_C_m = isotropic_4(dim*(mat_m.k-mat_0.k),
2*(mat_m.g-mat_0.g), dim)
# ...
# End: create (C_i - C_0) and (C_m - C_0)
# Begin: create local operator ε ↦ (C-C_0):ε
# ...
ops = np.empty(shape, dtype=object)
ops[:, :] = delta_C_m
imax = int(np.ceil(n*a-0.5))
ops[:imax, :imax] = delta_C_i
self.eps_to_tau = operators.BlockDiagonalOperator2D(ops)
# ...
# End: create local operator ε ↦ (C-C_0):ε
# Begin: create non-local operator ε ↦ Γ_0[ε]
# ...
self.green = green.truncated(mat_0.green_operator(),
shape, 1.,
fft.create_real(shape))
# End: create non-local operator ε ↦ Γ_0[ε]
# Begin: apply
def apply(self, x, out=None):
if out is None:
out = np.zeros_like(x)
self.eps_to_tau.apply(x, out)
self.green.apply(out, out)
# End: apply
# Begin: params
if __name__ == '__main__':
dim = 2 # Spatial dimension
sym = (dim*(dim+1))//2 # Dim. of space of second rank, symmetric tensors
n = 256 # Number of cells along each side of the grid
mu_i, nu_i = 100, 0.2 # Shear modulus and Poisson ratio of inclusion
mu_m, nu_m = 1, 0.3 # Shear modulus and Poisson ratio of matrix
mu_0, nu_0 = 50, 0.3 # Shear modulus and Poisson ratio of ref. mat.
num_cells = n**dim # Total number of cells
# End: params
# Begin: instantiate example
example = Example(mat_i=material.create(mu_i, nu_i, dim),
mat_m=material.create(mu_m, nu_m, dim),
mat_0=material.create(mu_0, nu_0, dim),
n=n,
dim=dim)
# End: instantiate example
# Begin: define strains
avg_eps = np.zeros((sym,), dtype=np.float64)
avg_eps[-1] = 1.0
eps = np.empty(example.green.ishape, dtype=np.float64)
new_eps = np.empty_like(eps)
# End: define strains
# Begin: iterate
num_iter = 400
res = np.empty((num_iter,), dtype=np.float64)
eps[...] = avg_eps
normalization = 1/np.sqrt(num_cells)/np.linalg.norm(avg_eps)
for i in range(num_iter):
example.apply(eps, out=new_eps)
np.subtract(avg_eps, new_eps, out=new_eps)
res[i] = normalization*np.linalg.norm(new_eps-eps)
eps, new_eps = new_eps, eps
# End: iterate
# Begin: post-process
tau = example.eps_to_tau.apply(eps)
avg_tau = np.mean(tau, axis=tuple(range(dim)))
C_1212 = mu_0+0.5*avg_tau[-1]/avg_eps[-1]
print(C_1212)
fig, ax = plt.subplots()
ax.set_xlabel('Number of iterations')
ax.set_ylabel('Normalized residual')
ax.loglog(res)
fig.tight_layout(pad=0.2)
fig.savefig('residual.png', transparent=True)
fig, ax_array = plt.subplots(nrows=1, ncols=3)
width, height = fig.get_size_inches()
fig.set_size_inches(width, width/3)
for i, ax in enumerate(ax_array):
ax.set_axis_off()
ax.imshow(eps[..., i], interpolation='nearest')
fig.tight_layout(pad=0)
fig.savefig('eps.png', transparent=True)
# End: post-process
| bsd-3-clause |
sys-bio/tellurium | docs/conf.py | 1 | 10266 | # -*- coding: utf-8 -*-
#
# tellurium documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 22 13:08:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
from mock import Mock as MagicMock
# Mock things for readthedoc build
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['roadrunner',
'roadrunner.testing',
'antimony',
'libsbml',
'libsedml',
'phrasedml',
'sbml2matlab',
'sedml2py',
'pygraphviz'
'numpy'
'matplotlib'
'ipython'
'ipywidgets']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('../tellurium'))
sys.path.append(os.path.join(os.path.dirname(__name__), '..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tellurium'
copyright = u'2014-2019, Kiri Choi, J Kyle Medley, Matthias König, Kaylene Stocking, Caroline Cannistra, Michal Galdzicki, and Herbert Sauro'
author = u'Kiri Choi, J Kyle Medley, Matthias König, Kaylene Stocking, Caroline Cannistra, Michal Galdzicki, and Herbert Sauro'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = None
with open(os.path.join(os.path.dirname(__file__), '../tellurium/VERSION.txt'), 'r') as f:
version = str(f.read().rstrip())
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = './images/tellurium_logo_50.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = './images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'telluriumdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tellurium.tex', u'tellurium Documentation',
u'sys-bio', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tellurium', u'tellurium Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tellurium', u'tellurium Documentation',
author, 'tellurium', 'Integrated dynamical modeling environment..',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
m4rx9/rna-pdb-tools | rna_tools/tools/PyMOL4RNA/PyMOL4Spliceosome.py | 2 | 11183 | """
See the PyMOL Sessions processed with this code here <https://github.com/mmagnus/PyMOL4Spliceosome>
"""
from pymol import cmd
from rna_tools.tools.PyMOL4RNA import code_for_color_spl
from rna_tools.tools.PyMOL4RNA import code_for_spl
try:
from pymol import cmd
except ImportError:
print("PyMOL Python lib is missing")
# sys.exit(0)
def spl(arg=''):
"""
action='', name=''
"""
if ' ' in arg:
action, name = arg.split()
name = name.lower()
else:
action = arg
name = ''
#import pandas as pd
#df = pd.read_excel("/home/magnus/Desktop/pyMoL_colors-EMX.xlsx")
if not action or action == 'help':
spl_help()
elif action == 'color' or arg=='c':
code_for_color_spl.spl_color()
elif arg == 'extract all' or arg == 'ea' or arg == 'e':
code_for_spl.spl_extract()
elif arg.startswith('hprp28'):
cmd.do("color purple, PRP28_h* and resi 240-361") # RecA1
cmd.do("color blue, PRP28_h* and resi 361-631") # RecA1
cmd.do("color orange, PRP28_h* and resi 631-811") # RecA2
elif arg.startswith('hprp8'):
print("RT, skyblue, 885-1251")
print("Thumb/X, cyan, 1257-1375")
cmd.do("color yellow, PRP8_h* and resi 1581-1752") # rt
cmd.do("color wheat, PRP8_h* and resi 1767-2020") # rh
cmd.do("color salmon, PRP8_h* and resi 2103-2234") # jab
cmd.do("color smudge, PRP8_h* and resi 1304-1577") # linker
cmd.do("color skyblue, PRP8_h* and resi 812-1303") # rt
elif arg.startswith('prp8'):
print("RT, skyblue, 885-1251")
print("Thumb/X, cyan, 1257-1375")
cmd.do("color skyblue, PRP8_y* and resi 885-1251") # rt
cmd.do("color cyan, PRP8_y* and resi 1257-1375") # thumb/x
cmd.do("color smudge, PRP8_y* and resi 1376-1649") # linker
cmd.do("color wheat, PRP8_y* and resi 1840-2090") # rh
cmd.do("color salmon, PRP8_y* and resi 2150-2395") # jab
cmd.do("color yellow, PRP8_y* and resi 1650-1840") # endo
elif arg.startswith(''):
if 'hjab' in arg.lower():
cmd.select('PRP8_h* and resi 2103-2234')
if 'hlinker' in arg.lower():
cmd.select('PRP8_h* and resi 1304-1577')
if 'hrt' in arg.lower():
cmd.select('PRP8_h* and resi 812-1303')
if 'hrh' in arg.lower():
cmd.select('PRP8_h* and resi 1767-2020')
if 'he' in arg.lower():
cmd.select('PRP8_h* and resi 1581-1752')
elif arg == 'align' or arg=='a':
cmd.do("""
align /5gm6//6, /5lj3//V;
align /5mps//6, /5lj3//V;
align /6exn//6, /5lj3//V;
align /5y88//D, /5lj3//V;
align /5ylz//D, /5lj3//V;
""")
else:
spl_help()
cmd.extend('spl', spl)
def spl_help():
print("""################ SPL #################
extract all (ea) - show
colors - list all colors
######################################
""")
spl_help()
def __spl_color():
for m in mapping:
protein = m[0]
chain = m[1]
color = m[2]
print('\_' + ' '.join([protein, chain, color]))
cmd.do('color ' + color + ', chain ' + chain)
# cmd.do('color firebrick, chain V') # U6
def _spl_color():
"""Color spl RNAs (for only color spl RNA and use 4-color code for residues see `spl2`)
"""
AllObj = cmd.get_names("all")
for name in AllObj:
if 'Exon' in name or 'exon' in name:
cmd.color('yellow', name)
if 'Intron' in name or 'intron' in name or '5splicing-site' in name:
cmd.color('gray40', name)
if '3exon-intron' in name.lower():
cmd.color('gray20', name)
if name.startswith("U2_snRNA"):
cmd.color('forest', name)
if name.startswith("U5_snRNA"):
cmd.color('blue', name)
if name.startswith("U4_snRNA"):
cmd.color('orange', name)
if name.startswith("U6_snRNA"):
cmd.color('red', name)
cmd.do('color gray')
# trisnrp
cmd.do('color orange, chain V') # conflict
cmd.do('color red, chain W')
cmd.do('color blue, chain U')
#
cmd.do('color blue, chain 5')
cmd.do('color forest, chain 2')
cmd.do('color red, chain 6')
cmd.do('color orange, chain 4')
cmd.do('color yellow, chain Y')
# shi
cmd.do('color blue, chain D') # u5
cmd.do('color forest, chain L') # u2
cmd.do('color red, chain E') # u6
cmd.do('color yellow, chain M')
cmd.do('color yellow, chain N')
# afte branch
cmd.do('color blue, chain U') # u5
cmd.do('color forest, chain Z') # u2
cmd.do('color red, chain V') # u6
cmd.do('color yellow, chain E')
cmd.do('color black, chain I')
# 5WSG
# Cryo-EM structure of the Catalytic Step II spliceosome (C* complex) at 4.0 angstrom resolution
cmd.do('color blue, chain D') # u5
#cmd.do('color forest, chain L') # u2
cmd.do('color yellow, chain B')
cmd.do('color yellow, chain b')
cmd.do('color black, chain N')
cmd.do('color black, chain M')
cmd.do('color black, chain 3') # orange
cmd.do('color black, chain E') # yellow
cmd.do('color black, chain i')
cmd.do('color black, chain e')
cmd.do('color black, chain e')
cmd.do('color dirtyviolet, chain L') # bud31
cmd.do('color rasberry, chain L') # CERF1
cmd.do('color skyblue, chain A') # PRP8
cmd.do('color grey60, chain B') # BRR2
cmd.do('color dirtyiolet, chain L') # BUD31
cmd.do('color rasberry, chain O') # CEF1
cmd.do('color rasberry, chain S') # CLF1
cmd.do('color dirtyviolet, chain P') # CWC15
cmd.do('color lightteal, chain D') # CWC16/YJU2
cmd.do('color ruby, chain M') # CWC2
cmd.do('color violetpurple, chain R') # CWC21
cmd.do('color bluewhite, chain H') # CWC22
cmd.do('color deepteal, chain F') # CWC25
cmd.do('color black, chain I') # Intron
cmd.do('color dirtyviolet, chain G') # ISY1
cmd.do('color palegreen, chain W') # LEA1
cmd.do('color palegreen, chain Y') # Msl1
cmd.do('color lightpink, chain K') # PRP45
cmd.do('color smudge, chain Q') # Prp16
cmd.do('color grey70, chain t') # Prp19
cmd.do('color lightblue, chain J') # PRP46
cmd.do('color chocolate, chain N') # SLT11/ECM2
cmd.do('color grey70, chain s') # Snt309
cmd.do('color slate, chain C') # SNU114
cmd.do('color brightorange, chain T') # SYF1
cmd.do('color forest, chain Z') # U2
cmd.do('color density, chain U') # U5
cmd.do('color deepblue, chain b') # U5_Sm
cmd.do('bg gray')
# cmd.do('remove (polymer.protein)')
cmd.set("cartoon_tube_radius", 1.0)
ino()
def spl2():
"""Color spl RNAs and use 4-color code for residues (for only color spl RNA see `spl`)
"""
AllObj = cmd.get_names("all")
for name in AllObj:
if 'Exon' in name or 'exon' in name:
cmd.color('yellow', name)
if 'Intron' in name or 'intron' in name or '5splicing-site' in name:
cmd.color('gray40', name)
if '3exon-intron' in name.lower():
cmd.color('gray20', name)
if name.startswith("U2_snRNA"):
cmd.color('forest', name)
if name.startswith("U5_snRNA"):
cmd.color('blue', name)
if name.startswith("U4_snRNA"):
cmd.color('orange', name)
if name.startswith("U6_snRNA"):
cmd.color('red', name)
cmd.do('color gray')
# trisnrp
cmd.do('color orange, chain V') # conflict
cmd.do('color red, chain W')
cmd.do('color blue, chain U')
#
cmd.do('color blue, chain 5')
cmd.do('color forest, chain 2')
cmd.do('color red, chain 6')
cmd.do('color orange, chain 4')
cmd.do('color yellow, chain Y')
# shi
cmd.do('color blue, chain D') # u5
cmd.do('color forest, chain L') # u2
cmd.do('color red, chain E') # u6
cmd.do('color yellow, chain M')
cmd.do('color yellow, chain N')
# afte branch
cmd.do('color blue, chain U') # u5
cmd.do('color forest, chain Z') # u2
cmd.do('color red, chain V') # u6
cmd.do('color yellow, chain E')
cmd.do('color black, chain I')
# 5WSG
# Cryo-EM structure of the Catalytic Step II spliceosome (C* complex) at 4.0 angstrom resolution
cmd.do('color blue, chain D') # u5
#cmd.do('color forest, chain L') # u2
cmd.do('color yellow, chain B')
cmd.do('color yellow, chain b')
cmd.do('color black, chain N')
cmd.do('color black, chain M')
cmd.do('color black, chain 3') # orange
cmd.do('color black, chain E') # yellow
cmd.do('color black, chain i')
cmd.do('color black, chain e')
cmd.do('bg gray')
cmd.do('remove (polymer.protein)')
cmd.color("red",'resn rG+G and name n1+c6+o6+c5+c4+n7+c8+n9+n3+c2+n1+n2')
cmd.color("forest",'resn rC+C and name n1+c2+o2+n3+c4+n4+c5+c6')
cmd.color("orange",'resn rA+A and name n1+c6+n6+c5+n7+c8+n9+c4+n3+c2')
cmd.color("blue",'resn rU+U and name n3+c4+o4+c5+c6+n1+c2+o2')
cmd.set("cartoon_tube_radius", 1.0)
ino()
def _spli():
"""
# this trick is taken from Rhiju's Das code
color red,resn rG+G and name n1+c6+o6+c5+c4+n7+c8+n9+n3+c2+n1+n2
color forest,resn rC+C and name n1+c2+o2+n3+c4+n4+c5+c6
color orange, resn rA+A and name n1+c6+n6+c5+n7+c8+n9+c4+n3+c2
color blue, resn rU+U and name n3+c4+o4+c5+c6+n1+c2+o2
#
#cmd.color("yellow", "*intron*")
#cmd.color("yellow", "*exon*")
#cmd.show("spheres", "inorganic")
#cmd.color("yellow", "inorganic")
"""
cmd.color("orange", "U4_snRNA*")
cmd.color("red", "U6_snRNA*")
cmd.color("blue", "U5_snRNA*")
cmd.color("green", "U2_snRNA*")
cmd.color("red",'resn rG+G and name n1+c6+o6+c5+c4+n7+c8+n9+n3+c2+n1+n2')
cmd.color("forest",'resn rC+C and name n1+c2+o2+n3+c4+n4+c5+c6')
cmd.color("orange",'resn rA+A and name n1+c6+n6+c5+n7+c8+n9+c4+n3+c2')
cmd.color("blue",'resn rU+U and name n3+c4+o4+c5+c6+n1+c2+o2')
try:
from pymol import cmd
except ImportError:
print("PyMOL Python lib is missing")
else:
#cmd.extend("spl", spl)
cmd.extend("spl2", spl2)
# colors taken from https://github.com/maxewilkinson/Spliceosome-PyMOL-sessions
cmd.set_color('lightgreen', [144, 238, 144])
cmd.set_color('darkgreen', [0, 100, 0])
cmd.set_color('darkseagreen', [143, 188, 143])
cmd.set_color('greenyellow', [173, 255, 47])
cmd.set_color('coral', [255, 127, 80])
cmd.set_color('darkorange', [255, 140, 0])
cmd.set_color('gold', [255, 215, 0])
cmd.set_color('lemonchiffon', [255,250,205])
cmd.set_color('moccasin', [255,228,181])
cmd.set_color('skyblue', [135,206,235])
cmd.set_color('lightyellow', [255,255,224])
cmd.set_color('powderblue', [176,224,230])
cmd.set_color('royalblue', [65,105,225])
cmd.set_color('cornflowerblue', [100,149,237])
cmd.set_color('steelblue', [70,130,180])
cmd.set_color('lightsteelblue', [176,196,222])
cmd.set_color('violetBlue', [40, 0, 120])
cmd.set_color('mediumpurple', [147,112,219])
print("""
PyMOL4Spliceosome
-----------------------
spl hprp8
spl prp8
""")
| mit |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 18 | 5832 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
saimn/glue | glue/viewers/image/layer_artist.py | 2 | 11664 | from __future__ import absolute_import, division, print_function
import logging
from abc import ABCMeta, abstractproperty, abstractmethod
import numpy as np
from matplotlib.cm import gray
from glue.external import six
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import MatplotlibLayerArtist, ChangedTrigger
from glue.core.util import small_view, small_view_array
from glue.utils import view_cascade, get_extent, color2rgb, Pointer
from .ds9norm import DS9Normalize
__all__ = ['RGBImageLayerArtist', 'ImageLayerArtist']
@six.add_metaclass(ABCMeta)
class RGBImageLayerBase(object):
r = abstractproperty() # ComponentID for red channel
g = abstractproperty() # ComponentID for green channel
b = abstractproperty() # ComponentID for blue channel
rnorm = abstractproperty() # Normalize instance for red channel
gnorm = abstractproperty() # Normalize instance for green channel
bnorm = abstractproperty() # Normalize instance for blue channel
contrast_layer = abstractproperty() # 'red' | 'green' | 'blue'. Which norm to adjust during set_norm
layer_visible = abstractproperty() # dict (str->bool). Whether to show 'red', 'green', 'blue' layers
@property
def color_visible(self):
"""
Return layer visibility as a list of [red_visible, green_visible, blue_visible]
"""
return [self.layer_visible['red'], self.layer_visible['green'],
self.layer_visible['blue']]
@color_visible.setter
def color_visible(self, value):
self.layer_visible['red'] = value[0]
self.layer_visible['green'] = value[1]
self.layer_visible['blue'] = value[2]
@six.add_metaclass(ABCMeta)
class ImageLayerBase(object):
norm = abstractproperty() # Normalization instance to scale intensities
cmap = abstractproperty() # colormap
@abstractmethod
def set_norm(self, **kwargs):
"""
Adjust the normalization instance parameters.
See :class:`glue.viewers.image.ds9norm.DS9Normalize attributes for valid
kwargs for this function
"""
pass
@abstractmethod
def clear_norm():
"""
Reset the norm to the default
"""
pass
@abstractmethod
def override_image(self, image):
"""
Temporarily display another image instead of a view into the data
The new image has the same shape as the view into the data
"""
pass
@abstractmethod
def clear_override(self):
"""
Remove the override image, and display the data again
"""
pass
@six.add_metaclass(ABCMeta)
class SubsetImageLayerBase(object):
pass
class ImageLayerArtist(MatplotlibLayerArtist, ImageLayerBase):
_property_set = MatplotlibLayerArtist._property_set + ['norm']
def __init__(self, layer, ax):
super(ImageLayerArtist, self).__init__(layer, ax)
self._norm = None
self._cmap = gray
self._override_image = None
self._clip_cache = None
self.aspect = 'equal'
@property
def norm(self):
return self._norm
@norm.setter
def norm(self, value):
self._norm = value
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, value):
self._cmap = value
for a in self.artists:
a.set_cmap(value)
def _default_norm(self, layer):
vals = np.sort(layer.ravel())
vals = vals[np.isfinite(vals)]
result = DS9Normalize()
result.stretch = 'arcsinh'
result.clip = True
if vals.size > 0:
result.vmin = vals[np.intp(.01 * vals.size)]
result.vmax = vals[np.intp(.99 * vals.size)]
return result
def override_image(self, image):
"""Temporarily show a different image"""
self._override_image = image
def clear_override(self):
self._override_image = None
def _extract_view(self, view, transpose):
if self._override_image is None:
result = self.layer[view]
if transpose:
result = result.T
return result
else:
v = [v for v in view if isinstance(v, slice)]
if transpose:
v = v[::-1]
result = self._override_image[v]
return result
def _update_clip(self, att):
key = (att, id(self._override_image),
self.norm.clip_lo, self.norm.clip_hi)
if self._clip_cache == key:
return
self._clip_cache = key
if self._override_image is None:
data = small_view(self.layer, att)
else:
data = small_view_array(self._override_image)
self.norm.update_clip(data)
def update(self, view, transpose=False, aspect=None):
if aspect is not None:
self.aspect = aspect
self.clear()
views = view_cascade(self.layer, view)
artists = []
lr0 = self._extract_view(views[0], transpose)
self.norm = self.norm or self._default_norm(lr0)
self.norm = self.norm or self._default_norm(lr0)
self._update_clip(views[0][0])
for v in views:
image = self._extract_view(v, transpose)
extent = get_extent(v, transpose)
artists.append(self._axes.imshow(image, cmap=self.cmap,
norm=self.norm,
interpolation='nearest',
origin='lower',
extent=extent, zorder=0))
self._axes.set_aspect(self.aspect, adjustable='datalim')
self.artists = artists
self._sync_style()
def set_norm(self, vmin=None, vmax=None,
bias=None, contrast=None, stretch=None, norm=None,
clip_lo=None, clip_hi=None):
if norm is not None:
self.norm = norm # XXX Should wrap ala DS9Normalize(norm)
return norm
if self.norm is None:
self.norm = DS9Normalize()
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
if bias is not None:
self.norm.bias = bias
if contrast is not None:
self.norm.contrast = contrast
if clip_lo is not None:
self.norm.clip_lo = clip_lo
if clip_hi is not None:
self.norm.clip_hi = clip_hi
if stretch is not None:
self.norm.stretch = stretch
return self.norm
def clear_norm(self):
self.norm = None
def _sync_style(self):
for artist in self.artists:
artist.set_zorder(self.zorder)
artist.set_visible(self.visible and self.enabled)
class RGBImageLayerArtist(ImageLayerArtist, RGBImageLayerBase):
_property_set = ImageLayerArtist._property_set + \
['r', 'g', 'b', 'rnorm', 'gnorm', 'bnorm', 'color_visible']
r = ChangedTrigger()
g = ChangedTrigger()
b = ChangedTrigger()
rnorm = Pointer('_rnorm')
gnorm = Pointer('_gnorm')
bnorm = Pointer('_bnorm')
# dummy class-level variables will be masked
# at instance level, needed for ABC to be happy
layer_visible = None
contrast_layer = None
def __init__(self, layer, ax, last_view=None):
super(RGBImageLayerArtist, self).__init__(layer, ax)
self.contrast_layer = 'green'
self.aspect = 'equal'
self.layer_visible = dict(red=True, green=True, blue=True)
self.last_view = last_view
def set_norm(self, *args, **kwargs):
spr = super(RGBImageLayerArtist, self).set_norm
if self.contrast_layer == 'red':
self.norm = self.rnorm
self.rnorm = spr(*args, **kwargs)
if self.contrast_layer == 'green':
self.norm = self.gnorm
self.gnorm = spr(*args, **kwargs)
if self.contrast_layer == 'blue':
self.norm = self.bnorm
self.bnorm = spr(*args, **kwargs)
def update(self, view=None, transpose=False, aspect=None):
self.clear()
if aspect is not None:
self.aspect = aspect
if self.r is None or self.g is None or self.b is None:
return
if view is None:
view = self.last_view
if view is None:
return
self.last_view = view
views = view_cascade(self.layer, view)
artists = []
for v in views:
extent = get_extent(v, transpose)
# first argument = component. swap
r = tuple([self.r] + list(v[1:]))
g = tuple([self.g] + list(v[1:]))
b = tuple([self.b] + list(v[1:]))
r = self.layer[r]
g = self.layer[g]
b = self.layer[b]
if transpose:
r = r.T
g = g.T
b = b.T
self.rnorm = self.rnorm or self._default_norm(r)
self.gnorm = self.gnorm or self._default_norm(g)
self.bnorm = self.bnorm or self._default_norm(b)
if v is views[0]:
self.rnorm.update_clip(small_view(self.layer, self.r))
self.gnorm.update_clip(small_view(self.layer, self.g))
self.bnorm.update_clip(small_view(self.layer, self.b))
image = np.dstack((self.rnorm(r),
self.gnorm(g),
self.bnorm(b)))
if not self.layer_visible['red']:
image[:, :, 0] *= 0
if not self.layer_visible['green']:
image[:, :, 1] *= 0
if not self.layer_visible['blue']:
image[:, :, 2] *= 0
artists.append(self._axes.imshow(image,
interpolation='nearest',
origin='lower',
extent=extent, zorder=0))
self._axes.set_aspect(self.aspect, adjustable='datalim')
self.artists = artists
self._sync_style()
class SubsetImageLayerArtist(MatplotlibLayerArtist, SubsetImageLayerBase):
def __init__(self, *args, **kwargs):
super(SubsetImageLayerArtist, self).__init__(*args, **kwargs)
self.aspect = 'equal'
def update(self, view, transpose=False, aspect=None):
self.clear()
if aspect is not None:
self.aspect = aspect
subset = self.layer
logging.debug("View into subset %s is %s", self.layer, view)
try:
mask = subset.to_mask(view[1:])
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
logging.debug("View mask has shape %s", mask.shape)
# shortcut for empty subsets
if not mask.any():
return
if transpose:
mask = mask.T
extent = get_extent(view, transpose)
r, g, b = color2rgb(self.layer.style.color)
mask = np.dstack((r * mask, g * mask, b * mask, mask * .5))
mask = (255 * mask).astype(np.uint8)
self.artists = [self._axes.imshow(mask, extent=extent,
interpolation='nearest',
origin='lower',
zorder=5, visible=self.visible)]
self._axes.set_aspect(self.aspect, adjustable='datalim')
| bsd-3-clause |
danielSbastos/face-recognition-scienceFair | detect_webcam.py | 1 | 6003 | #libraries to plot face features
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
#libraries for apiconnection
import requests
import json
import sys
#Libraries to take photo with webcam
import pygame
import pygame.camera
from pygame.locals import *
''' TAKE PHOTO WITH WEBCAM AND COMPARE WITH GROUP PERSON TO FIND SIMILAR PERSON '''
#info to save webcam photo
global FILENAME
FILENAME = '{FILENAME}.jpg' #file taken by webcam will be saved with this name and later be used for analysis
DEVICE = '/dev/video0' #webcam port
SIZE = (640, 480) #size of pygame webcam window size
key = '{Ocp-Apim-Subscription-Key}' #YOUR key. Not gonna give you mine, smartypants
targetFace = []
faceLandmarks = []
personId = []
'''get all personIds in certain personGroup'''
def get_ids(personId, key):
headers = {'Ocp-Apim-Subscription-Key': key}
params = {'top': '100'}
resp_get_ids = requests.get("https://westus.api.cognitive.microsoft.com/face/v1.0/persongroups/people/{personGroupId}?",
params = params,
headers = headers)
data = json.loads(resp_get_ids.text)
for i in range(len(data)):
#append to list the person id with its correspondant name, e.g. [124534nbh424523n5, Gabriel]
personId.append([data[i]["personId"],data[i]["name"]])
'''open webcam, take picture and save it as FILENAME'''
def camstream():
#initialize camera and pygame
pygame.init()
pygame.camera.init()
#initialize a window or screen for display
display = pygame.display.set_mode(SIZE, 0)
#load a camera and initialize it
camera = pygame.camera.Camera(DEVICE, SIZE)
camera.start()
#call surface to represent display
screen = pygame.surface.Surface(SIZE, 0, display)
capture = True
while capture:
#captures screen as a Surface
screen = camera.get_image(screen)
#update the full display Surface to the screen (previously only )
display.blit(screen, (0,0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == QUIT: #if "x" button is present, finish capture and don't save the image
capture = False
elif event.type == KEYDOWN and event.key == K_s: #if "s" key is pressed, save photo and finish capture
pygame.image.save(screen, FILENAME) #save photo as FILENAME
capture = False #break loop and exit it
camera.stop()
pygame.quit()
return
'''for the webcam photo, detect face coordinates and face landmarks, respectively appending them to targetFace and faceLandmarks'''
def detect(key, targetFace, faceLandmarks):
headers_octet = {
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': key}
params = {'returnFaceId': 'true', #return faceId and face coordinates
'returnFaceLandmarks' : 'true'} #return 27 coordinates of face landmarks
resp_detect = requests.post("https://westus.api.cognitive.microsoft.com/face/v1.0/detect?%",
params = params,
data = open(FILENAME, 'rb'),
headers = headers_octet)
data = json.loads(resp_detect.text)
faceId = str(data[0]["faceId"])
#append face rectangle coordinates
targetFace.append(data[0]['faceRectangle']["left"])
targetFace.append(data[0]['faceRectangle']["top"])
targetFace.append(data[0]['faceRectangle']["width"])
targetFace.append(data[0]['faceRectangle']["height"])
#append each face landmarks, e.g. right and left pupil coordenates
face_landmarks = data[0]['faceLandmarks']
for sub_landmarks in face_landmarks:
faceLandmarks.append(face_landmarks[sub_landmarks])
return faceId
'''with photo already taken and having defined faceLandmarks and targetFace, check the confidence if it is
each person in personGroup'''
def verify(personId, key):
headers_json = {'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': key}
faceId = detect(key, targetFace, faceLandmarks) #calling the detect() function to return faceId of the webcam picture
body = "{'faceId': '%s', 'personGroupId': 'people', 'personId': '%s'}" % (faceId, personId[0])
resp_verify = requests.post('https://westus.api.cognitive.microsoft.com/face/v1.0/verify?',
data = body,
headers = headers_json)
#with response, transform it to json and verify the confidence between each person in personGroup
data = json.loads(resp_verify.text)
if data["confidence"] >= 0.70: #if higher or equal than 70% of accuracy -> it is that person
print("This is " + personId[1], data['confidence'])
else: #if less than 70% of accuracy -> it is not that person
print("This is not " + personId[1], data['confidence'])
''' plot the coordinates defined in faceLandmarks as circles and targetFace as rectangle in taken photo'''
def draw():
im = np.array(Image.open(FILENAME), dtype=np.uint8)
#create figure and axes
fig,ax = plt.subplots(1)
#display the image
ax.imshow(im)
#create a rectangle patch for the face and then attach to axis
rect = patches.Rectangle((targetFace[0],targetFace[1]),targetFace[2],targetFace[3],linewidth=2,edgecolor='r',facecolor='none')
ax.add_patch(rect)
#create circles patches for face landmarks and then attach each to axis
for i in faceLandmarks:
x, y = i['x'], i['y']
circle = patches.Circle((x,y),5, color = 'r', fill = True)
ax.add_patch(circle)
#show image in window
plt.show()
if __name__ == '__main__':
get_ids(personId,key) #get all present ids from person group
camstream() #take picture with camera to be analysed
for i in personId: #for each personId, e.g. for each person in the person group, check confidence in
verify(i, key) #detect() is called inside this function
draw() #draw 27 points and rectangle on image taken by webcam
| mit |
marcocaccin/scikit-learn | sklearn/mixture/gmm.py | 6 | 31222 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
litaotao/mpld3 | doc/sphinxext/plot_generator.py | 19 | 10614 | import sys
import os
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg') # don't display plots
import mpld3
from matplotlib import image
from matplotlib.figure import Figure
class disable_mpld3(object):
"""Context manager to temporarily disable mpld3.show() command"""
def __enter__(self):
self.show = mpld3.show
mpld3.show = lambda *args, **kwargs: None
return self
def __exit__(self, type, value, traceback):
mpld3.show = self.show
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. raw:: html
{img_html}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
display: inline;
width: 170px;
height: 170px;
opacity:0.4;
filter:alpha(opacity=40); /* For IE8 and earlier */
}}
.figure img:hover
{{
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure .caption {{
width: 180px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example Gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
BANNER_JS_TEMPLATE = """
var banner_data = {banner_data};
banner_data.forEach(function(d, i) {{
d.i = i;
}});
var height = 150,
width = 900,
imageHeight = 150,
imageWidth = 150,
zoomfactor = 0.1;
var banner = d3.select(".example-banner");
banner.style("height", height + "px")
.style("width", width + "px")
.style("margin-left", "auto")
.style("margin-right", "auto");
var svg = banner.append("svg")
.attr("width", width + "px")
.attr("height", height + "px");
var anchor = svg.append("g")
.attr("class", "example-anchor")
.selectAll("a")
.data(banner_data.slice(0, 7));
anchor.exit().remove();
var anchor_elements = anchor.enter().append("a")
.attr("xlink:href", function(d) {{ return d.url; }})
.attr("xlink:title", function(d) {{ return d.title; }});
anchor_elements.append("svg:image")
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * imageHeight)
.attr("xlink:href", function(d){{ return d.thumb; }})
.attr("xroot", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("x", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("y", d3.round(0.5 * zoomfactor * imageHeight))
.attr("i", function(d){{return d.i;}})
.on("mouseover", function() {{
var img = d3.select(this);
img.transition()
.attr("width", imageWidth)
.attr("height", height)
.attr("x", img.attr("xroot")
- d3.round(0.5 * zoomfactor * imageWidth))
.attr("y", 0);
}})
.on("mouseout", function() {{
var img = d3.select(this);
img.transition()
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * height)
.attr("x", img.attr("xroot"))
.attr("y", d3.round(0.5 * zoomfactor * imageHeight));
}});
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.6, border=4):
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
baseout, extout = os.path.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - 0.5 * width)
y0 = int(cy * rows - 0.5 * height)
thumb = im[y0: y0 + height,
x0: x0 + width]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
extension = extout.lower()
if extension == '.png':
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
elif extension == '.pdf':
from matplotlib.backends.backend_pdf \
import FigureCanvasPDF as FigureCanvas
elif extension == '.svg':
from matplotlib.backends.backend_svg \
import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
fig = Figure(figsize=(float(width) / dpi, float(height) / dpi),
dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.extract_docstring()
self.exec_file()
@property
def dirname(self):
return os.path.split(self.filename)[0]
@property
def fname(self):
return os.path.split(self.filename)[1]
@property
def modulename(self):
return os.path.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
return self.modulename + '.png'
@property
def thumbfilename(self):
# TODO: don't hard-code image path
return "_images/" + self.pngfilename
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
with disable_mpld3():
import matplotlib.pyplot as plt
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
self.html = mpld3.fig_to_html(fig)
thumbfile = os.path.join(self.target_dir,
self.pngfilename)
fig.savefig(thumbfile)
create_thumbnail(thumbfile, thumbfile)
def toctree_entry(self):
return " ./%s\n\n" % os.path.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. figure:: ./{0}\n"
" :target: ./{1}\n"
" :align: center\n\n"
" :ref:`{2}`\n\n".format(self.pngfilename,
self.htmlfilename,
self.sphinxtag))
def main(app):
static_dir = os.path.join(app.builder.srcdir, '_static')
target_dir = os.path.join(app.builder.srcdir, 'examples')
source_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'..', 'examples'))
if not os.path.exists(static_dir):
os.makedirs(static_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(os.path.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": os.path.join('examples', ex.htmlfilename),
"thumb": os.path.join(ex.thumbfilename)})
shutil.copyfile(filename, os.path.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_html=indent(ex.html, 4))
with open(os.path.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = os.path.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example-gallery",
toctree=toctree,
contents=contents))
# write javascript include for front page
js_file = os.path.join(static_dir, 'banner_data.js')
with open(js_file, 'w') as js:
js.write(BANNER_JS_TEMPLATE.format(
banner_data=json.dumps(banner_data)))
def setup(app):
app.connect('builder-inited', main)
| bsd-3-clause |
jordanopensource/data-science-bootcamp | MachineLearning/Session3/k_means_cluster.py | 1 | 2249 | import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than five clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, the line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
### rename the "name" parameter when you change the number of features
### so that the figure gets saved to a different file
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
| mit |
ChanChiChoi/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
neutrons/FastGR | addie/processing/idl/table_handler.py | 1 | 22403 | from __future__ import (absolute_import, division, print_function)
#import re
import glob
import os
import numpy as np
from qtpy.QtCore import (Qt)
from qtpy.QtGui import (QCursor)
from qtpy.QtWidgets import (QFileDialog, QMenu, QMessageBox, QTableWidgetSelectionRange)
import addie.processing.idl.populate_master_table
from addie.processing.idl.export_table import ExportTable
from addie.processing.idl.import_table import ImportTable
from addie.utilities.file_handler import FileHandler
from addie.processing.idl.populate_background_widgets import PopulateBackgroundWidgets
from addie.processing.idl.sample_environment_handler import SampleEnvironmentHandler
import addie.processing.idl.step2_gui_handler
from addie.widgets.filedialog import get_save_file
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
class TableHandler(object):
list_selected_row = None
def __init__(self, parent=None):
self.parent = parent
def retrieve_list_of_selected_rows(self):
self.list_selected_row = []
for _row_index in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row_index, 0).children()[1]
if _selected_widget.checkState() == Qt.Checked:
_entry = self._collect_metadata(row_index=_row_index)
self.list_selected_row.append(_entry)
def _collect_metadata(self, row_index=-1):
if row_index == -1:
return []
_name = self.retrieve_item_text(row_index, 1)
_runs = self.retrieve_item_text(row_index, 2)
_sample_formula = self.retrieve_item_text(row_index, 3)
_mass_density = self.retrieve_item_text(row_index, 4)
_radius = self.retrieve_item_text(row_index, 5)
_packing_fraction = self.retrieve_item_text(row_index, 6)
_sample_shape = self._retrieve_sample_shape(row_index)
_do_abs_correction = self._retrieve_do_abs_correction(row_index)
_metadata = {'name': _name,
'runs': _runs,
'sample_formula': _sample_formula,
'mass_density': _mass_density,
'radius': _radius,
'packing_fraction': _packing_fraction,
'sample_shape': _sample_shape,
'do_abs_correction': _do_abs_correction}
return _metadata
def _retrieve_sample_shape(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
_sample_shape = _widget.itemText(_selected_index)
return _sample_shape
def _retrieve_do_abs_correction(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 8).children()[1]
if (_widget.checkState() == Qt.Checked):
return 'go'
else:
return 'nogo'
def current_row(self):
_row = self.parent.postprocessing_ui.table.currentRow()
return _row
def right_click(self, position=None):
_duplicate_row = -1
_plot_sofq = -1
_remove_row = -1
_new_row = -1
_copy = -1
_paste = -1
_cut = -1
_refresh_table = -1
_clear_table = -1
# _import = -1
# _export = -1 _check_all = -1
_uncheck_all = -1
_undo = -1
_redo = -1
_plot_sofq_diff_first_run_row = -1
_plot_sofq_diff_average_row = -1
_plot_cryostat = -1
_plot_furnace = -1
_invert_selection = -1
menu = QMenu(self.parent)
if self.parent.table_selection_buffer == {}:
paste_status = False
else:
paste_status = True
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_undo = menu.addAction("Undo")
_undo.setEnabled(self.parent.undo_button_enabled)
_redo = menu.addAction("Redo")
_redo.setEnabled(self.parent.redo_button_enabled)
menu.addSeparator()
_copy = menu.addAction("Copy")
_paste = menu.addAction("Paste")
self._paste_menu = _paste
_paste.setEnabled(paste_status)
_cut = menu.addAction("Clear")
menu.addSeparator()
_check_all = menu.addAction("Check All")
_uncheck_all = menu.addAction("Unchecked All")
menu.addSeparator()
_invert_selection = menu.addAction("Inverse Selection")
menu.addSeparator()
_new_row = menu.addAction("Insert Blank Row")
if (self.parent.postprocessing_ui.table.rowCount() > 0):
_duplicate_row = menu.addAction("Duplicate Row")
_remove_row = menu.addAction("Remove Row(s)")
menu.addSeparator()
_plot_menu = menu.addMenu('Plot')
_plot_sofq = _plot_menu.addAction("S(Q) ...")
_plot_sofq_diff_first_run_row = _plot_menu.addAction("S(Q) Diff (1st run)...")
_plot_sofq_diff_average_row = _plot_menu.addAction("S(Q) Diff (Avg.)...")
_temp_menu = _plot_menu.addMenu("Temperature")
_plot_cryostat = _temp_menu.addAction("Cyrostat...")
_plot_furnace = _temp_menu.addAction("Furnace...")
menu.addSeparator()
_refresh_table = menu.addAction("Refresh/Reset Table")
_clear_table = menu.addAction("Clear Table")
action = menu.exec_(QCursor.pos())
self.current_row = self.current_row()
if action == _undo:
self.parent.action_undo_clicked()
elif action == _redo:
self.parent.action_redo_clicked()
elif action == _copy:
self._copy()
elif action == _paste:
self._paste()
elif action == _cut:
self._cut()
elif action == _duplicate_row:
self._duplicate_row()
elif action == _plot_sofq:
self._plot_sofq()
elif action == _plot_sofq_diff_first_run_row:
self._plot_sofq_diff_first_run_row()
elif action == _plot_sofq_diff_average_row:
self._plot_sofq_diff_average_row()
elif action == _plot_cryostat:
self._plot_temperature(samp_env_choice='cryostat')
elif action == _plot_furnace:
self._plot_temperature(samp_env_choice='furnace')
elif action == _invert_selection:
self._inverse_selection()
elif action == _new_row:
self._new_row()
elif action == _remove_row:
self._remove_selected_rows()
elif action == _refresh_table:
self._refresh_table()
elif action == _clear_table:
self._clear_table()
elif action == _check_all:
self.check_all()
elif action == _uncheck_all:
self.uncheck_all()
def _import(self):
_current_folder = self.parent.current_folder
[_table_file, _] = QFileDialog.getOpenFileName(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter=("text (*.txt);; All Files (*.*)"))
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
new_path = os.path.dirname(_table_file)
self.parent.current_folder = new_path
self._clear_table()
_import_handler = ImportTable(filename=_table_file, parent=self.parent)
_import_handler.run()
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _export(self):
_current_folder = self.parent.current_folder
_table_file, _ = get_save_file(parent=self.parent,
caption="Select File",
directory=_current_folder,
filter={'text (*.txt)':'txt', 'All Files (*.*)':''})
if not _table_file:
return
if isinstance(_table_file, tuple):
_table_file = _table_file[0]
_file_handler = FileHandler(filename=_table_file)
_file_handler.check_file_extension(ext_requested='txt')
_table_file = _file_handler.filename
_export_handler = ExportTable(parent=self.parent,
filename=_table_file)
_export_handler.run()
def _copy(self):
_selection = self.parent.postprocessing_ui.table.selectedRanges()
_selection = _selection[0]
left_column = _selection.leftColumn()
right_column = _selection.rightColumn()
top_row = _selection.topRow()
bottom_row = _selection.bottomRow()
self.parent.table_selection_buffer = {'left_column': left_column,
'right_column': right_column,
'top_row': top_row,
'bottom_row': bottom_row}
self._paste_menu.setEnabled(True)
def _paste(self, _cut=False):
_copy_selection = self.parent.table_selection_buffer
_copy_left_column = _copy_selection['left_column']
# make sure selection start at the same column
_paste_selection = self.parent.postprocessing_ui.table.selectedRanges()
_paste_left_column = _paste_selection[0].leftColumn()
if not (_copy_left_column == _paste_left_column):
QMessageBox.warning(self.parent,
"Check copy/paste selection!",
"Check your selection! ")
return
_copy_right_column = _copy_selection["right_column"]
_copy_top_row = _copy_selection["top_row"]
_copy_bottom_row = _copy_selection["bottom_row"]
_paste_top_row = _paste_selection[0].topRow()
index = 0
for _row in range(_copy_top_row, _copy_bottom_row+1):
_paste_row = _paste_top_row + index
for _column in range(_copy_left_column, _copy_right_column + 1):
if _column in np.arange(1, 7):
if _cut:
_item_text = ''
else:
_item_text = self.retrieve_item_text(_row, _column)
self.paste_item_text(_paste_row, _column, _item_text)
if _column == 7:
if _cut:
_widget_index = 0
else:
_widget_index = self.retrieve_sample_shape_index(_row)
self.set_widget_index(_widget_index, _paste_row)
if _column == 8:
if _cut:
_widget_state = Qt.Unchecked
else:
_widget_state = self.retrieve_do_abs_correction_state(_row)
self.set_widget_state(_widget_state, _paste_row)
index += 1
def _inverse_selection(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
self.select_all(status=True)
# inverse selected rows
for _range in selected_range:
_range.leftColumn = 0
_range.rightColun = nbr_column-1
self.parent.postprocessing_ui.table.setRangeSelected(_range, False)
def select_all(self, status=True):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_full_range = QTableWidgetSelectionRange(0, 0, nbr_row-1, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_full_range, status)
def check_all(self):
self.select_first_column(status=True)
def uncheck_all(self):
self.select_first_column(status=False)
def select_row(self, row=-1, status=True):
nbr_column = self.parent.postprocessing_ui.table.columnCount()
_range = QTableWidgetSelectionRange(row, 0, row, nbr_column-1)
self.parent.postprocessing_ui.table.setRangeSelected(_range, status)
def check_row(self, row=-1, status=True):
_widgets = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(row, 0).children()[1]
_selected_widget.setChecked(status)
def select_first_column(self, status=True):
for _row in range(self.parent.postprocessing_ui.table.rowCount()):
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(status)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def check_selection_status(self, state, row):
list_ranges = self.parent.postprocessing_ui.table.selectedRanges()
for _range in list_ranges:
bottom_row = _range.bottomRow()
top_row = _range.topRow()
range_row = list(range(top_row, bottom_row + 1))
for _row in range_row:
_widgets = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()
if len(_widgets) > 0:
_selected_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 0).children()[1]
_selected_widget.setChecked(state)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _cut(self):
self._copy()
self._paste(_cut=True)
def _duplicate_row(self):
_row = self.current_row
metadata_to_copy = self._collect_metadata(row_index=_row)
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
o_populate.add_new_row(metadata_to_copy, row=_row)
def _plot_fetch_files(self, file_type='SofQ'):
if file_type == 'SofQ':
search_dir = './SofQ'
prefix = 'NOM_'
suffix = 'SQ.dat'
elif file_type == 'nexus':
cwd = os.getcwd()
search_dir = cwd[:cwd.find('shared')]+'/nexus'
prefix = 'NOM_'
suffix = '.nxs.h5'
#ipts = int(re.search(r"IPTS-(\d*)\/", os.getcwd()).group(1))
_row = self.current_row
_row_runs = self._collect_metadata(row_index=_row)['runs'].split(',')
output_list = list()
file_list = [a_file for a_file in glob.glob(search_dir+'/'+prefix+'*')]
for run in _row_runs:
the_file = search_dir+'/'+prefix+str(run)+suffix
if the_file in file_list:
output_list.append({'file': the_file, 'run': run})
return output_list
def _plot_fetch_data(self):
file_list = self._plot_fetch_files(file_type='SofQ')
for data in file_list:
with open(data['file'], 'r') as handle:
x, y, e = np.loadtxt(handle, unpack=True)
data['x'] = x
data['y'] = y
return file_list
def _plot_datasets(self, datasets, shift_value=1.0, cmap_choice='inferno', title=None):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# configure plot
cmap = plt.get_cmap(cmap_choice)
cNorm = colors.Normalize(vmin=0, vmax=len(datasets))
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)
mrks = [0, -1]
# plot data
shifter = 0.0
for idx, data in enumerate(datasets):
data['y'] += shifter
colorVal = scalarMap.to_rgba(idx)
if 'linestyle' in data:
ax.plot(data['x'], data['y'], data['linestyle']+'o', label=data['run'], color=colorVal, markevery=mrks,)
else:
ax.plot(data['x'], data['y'], label=data['run'], color=colorVal, markevery=mrks)
shifter += shift_value
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], title='Runs', loc='center left', bbox_to_anchor=(1, 0.5))
if title:
fig.suptitle(title)
plt.show()
def _plot_sofq(self):
sofq_datasets = self._plot_fetch_data()
self._plot_datasets(sorted(sofq_datasets, key=lambda k: int(k['run'])), title='S(Q)')
def _plot_sofq_diff_first_run_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_base = dict(sofq_datasets[0])
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_base['y']
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - S(Q) for run '+sofq_base['run'])
def _plot_sofq_diff_average_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_data = [sofq['y'] for sofq in sofq_datasets]
sofq_avg = np.average(sofq_data, axis=0)
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_avg
self._plot_datasets(sofq_datasets, shift_value=0.2, title='S(Q) - <S(Q)>')
def _plot_temperature(self, samp_env_choice=None):
file_list = self._plot_fetch_files(file_type='nexus')
samp_env = SampleEnvironmentHandler(samp_env_choice)
datasets = list()
for data in file_list:
samp_x, samp_y = samp_env.getDataFromFile(data['file'], 'samp')
envi_x, envi_y = samp_env.getDataFromFile(data['file'], 'envi')
print(data['file'])
datasets.append({'run': data['run'] + '_samp', 'x': samp_x, 'y': samp_y, 'linestyle': '-'})
datasets.append({'run': None, 'x': envi_x, 'y': envi_y, 'linestyle': '--'})
self._plot_datasets(sorted(datasets, key=lambda k: k['run']),
shift_value=0.0, title='Temperature: '+samp_env_choice)
def _new_row(self):
_row = self.current_row
if _row == -1:
_row = 0
o_populate = addie.processing.idl.populate_master_table.PopulateMasterTable(main_window=self.parent)
_metadata = o_populate.empty_metadata()
o_populate.add_new_row(_metadata, row=_row)
def _remove_selected_rows(self):
selected_range = self.parent.postprocessing_ui.table.selectedRanges()
_nbr_row_removed = 0
_local_nbr_row_removed = 0
for _range in selected_range:
_top_row = _range.topRow()
_bottom_row = _range.bottomRow()
nbr_row = _bottom_row - _top_row + 1
for i in np.arange(nbr_row):
self._remove_row(row=_top_row - _nbr_row_removed)
_local_nbr_row_removed += 1
_nbr_row_removed = _local_nbr_row_removed
_pop_back_wdg = PopulateBackgroundWidgets(main_window=self.parent)
_pop_back_wdg.run()
def _remove_row(self, row=-1):
if row == -1:
row = self.current_row
self.parent.postprocessing_ui.table.removeRow(row)
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _refresh_table(self):
self.parent.populate_table_clicked()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def _clear_table(self):
_number_of_row = self.parent.postprocessing_ui.table.rowCount()
self.parent.postprocessing_ui.table.setSortingEnabled(False)
for _row in np.arange(_number_of_row):
self.parent.postprocessing_ui.table.removeRow(0)
self.parent.postprocessing_ui.background_line_edit.setText("")
self.parent.postprocessing_ui.background_comboBox.clear()
_o_gui = addie.processing.idl.step2_gui_handler.Step2GuiHandler(main_window=self.parent)
_o_gui.check_gui()
def set_widget_state(self, _widget_state, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
_widget.setCheckState(_widget_state)
def retrieve_do_abs_correction_state(self, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 8).children()[1]
return _widget.checkState()
def set_widget_index(self, _widget_index, _row):
_widget = self.parent.postprocessing_ui.table.cellWidget(_row, 7)
_widget.setCurrentIndex(_widget_index)
def paste_item_text(self, _row, _column, _item_text):
_item = self.parent.postprocessing_ui.table.item(_row, _column)
_item.setText(_item_text)
def retrieve_sample_shape_index(self, row_index):
_widget = self.parent.postprocessing_ui.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
return _selected_index
def retrieve_item_text(self, row, column):
_item = self.parent.postprocessing_ui.table.item(row, column)
if _item is None:
return ''
else:
return str(_item.text())
def name_search(self):
nbr_row = self.parent.postprocessing_ui.table.rowCount()
if nbr_row == 0:
return
_string = str(self.parent.postprocessing_ui.name_search.text()).lower()
if _string == '':
self.select_all(status=False)
else:
for _row in range(nbr_row):
_text_row = str(self.parent.postprocessing_ui.table.item(_row, 1).text()).lower()
if _string in _text_row:
self.select_row(row=_row, status=True)
| mit |
yyjiang/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
RichardTMR/homework | week1/class1_linear_regression.py | 1 | 3290 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
def plot_line(x, y, y_hat,line_color='blue'):
# Plot outputs
plt.scatter(x, y, color='black')
plt.plot(x, y_hat, color=line_color,
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
def linear_grad_func(theta, x, y):
# compute gradient
m = y.size
it = np.ones(shape=(m, 2))
it[:, 1] = x[:, 0]
prediction = it.dot(theta.transpose())
grad = ((prediction - y).transpose().dot(it)) / m * 1.0
return grad
def linear_val_func(theta, x):
# forwarding
return np.dot(np.c_[np.ones(x.shape[0]), x], theta.T)
def linear_cost_func(theta, x, y):
# compute cost (loss)
m = len(y)
it = np.ones(shape=(m, 2))
it[:, 1] = x[:, 0]
predictions = it.dot(theta.transpose())
sqerrors = (predictions - y) ** 2
cost = (1.0 / (2 * m)) * sqerrors.sum()
return cost
def linear_grad_desc(theta, X_train, Y_train, lr=0.1, max_iter=10000, converge_change=.001):
cost_iter = []
cost = linear_cost_func(theta, X_train, Y_train)
cost_iter.append([0, cost])
cost_change = 1
i = 1
while cost_change > converge_change and i < max_iter:
pre_cost = cost
# compute gradient
grad = linear_grad_func(theta, X_train, Y_train)
# Update gradient
theta = theta - lr * grad
cost = linear_cost_func(theta, X_train, Y_train)
cost_iter.append([i, cost])
cost_change = abs(cost - pre_cost)
i += 1
return theta, cost_iter
def linear_regression():
# load dataset
dataset = datasets.load_diabetes()
# Select only 2 dims
X = dataset.data[:, 2]
Y = dataset.target
# split dataset into training and testing
X_train = X[:-20, None]
X_test = X[-20:, None]
Y_train = Y[:-20, None]
Y_test = Y[-20:, None]
# Linear regression
theta = np.random.rand(1, X_train.shape[1]+1)
fitted_theta, cost_iter = linear_grad_desc(theta, X_train, Y_train, lr=0.1, max_iter=50000)
print('Coefficients: {}'.format(fitted_theta[0,-1]))
print('Intercept: {}'.format(fitted_theta[0,-2]))
print('MSE: {}'.format(np.sum((linear_val_func(fitted_theta, X_test) - Y_test)**2) / Y_test.shape[0]))
plot_line(X_test, Y_test, linear_val_func(fitted_theta, X_test))
def sklearn_linear_regression():
# load dataset
dataset = datasets.load_diabetes()
# Select only 2 dims
X = dataset.data[:, 2]
Y = dataset.target
# split dataset into training and testing
X_train = X[:-20, None]
X_test = X[-20:, None]
Y_train = Y[:-20, None]
Y_test = Y[-20:, None]
# Linear regression
regressor = linear_model.LinearRegression()
regressor.fit(X_train, Y_train)
print('Coefficients: {}'.format(regressor.coef_))
print('Intercept: {}'.format(regressor.intercept_))
print('MSE:{}'.format(np.mean((regressor.predict(X_test) - Y_test) ** 2)))
plot_line(X_test, Y_test, regressor.predict(X_test),line_color='red')
def main():
print('Class 1 Linear Regression Example')
linear_regression()
print ('')
print('sklearn Linear Regression Example')
sklearn_linear_regression()
if __name__ == "__main__":
main()
| apache-2.0 |
jyhmiinlin/cineFSE | GeRaw/phantom.py | 2 | 5548 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 17:22:39 2011
Copied from Alex Opie (see below)
@author: Eric
"""
## Copyright (C) 2010 Alex Opie <lx_op@orcon.net.nz>
##
## This program is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or (at
## your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; see the file COPYING. If not, see
## <http://www.gnu.org/licenses/>.
#import numpy as np
from numpy import size, zeros, mgrid, flipud
from math import pi, cos, sin
def phantom (n = 256, p_type = 'Modified Shepp-Logan', ellipses = None):
"""
phantom (n = 256, p_type = 'Modified Shepp-Logan', ellipses = None)
Create a Shepp-Logan or modified Shepp-Logan phantom.
A phantom is a known object (either real or purely mathematical)
that is used for testing image reconstruction algorithms. The
Shepp-Logan phantom is a popular mathematical model of a cranial
slice, made up of a set of ellipses. This allows rigorous
testing of computed tomography (CT) algorithms as it can be
analytically transformed with the radon transform (see the
function `radon').
Inputs
------
n : The edge length of the square image to be produced.
p_type : The type of phantom to produce. Either
"Modified Shepp-Logan" or "Shepp-Logan". This is overridden
if `ellipses' is also specified.
ellipses : Custom set of ellipses to use. These should be in
the form
[[I, a, b, x0, y0, phi],
[I, a, b, x0, y0, phi],
...]
where each row defines an ellipse.
I : Additive intensity of the ellipse.
a : Length of the major axis.
b : Length of the minor axis.
x0 : Horizontal offset of the centre of the ellipse.
y0 : Vertical offset of the centre of the ellipse.
phi : Counterclockwise rotation of the ellipse in degrees,
measured as the angle between the horizontal axis and
the ellipse major axis.
The image bounding box in the algorithm is [-1, -1], [1, 1],
so the values of a, b, x0, y0 should all be specified with
respect to this box.
Output
------
P : A phantom image.
Usage example
-------------
import matplotlib.pyplot as pl
P = phantom ()
pl.imshow (P)
References
----------
Shepp, L. A.; Logan, B. F.; Reconstructing Interior Head Tissue
from X-Ray Transmissions, IEEE Transactions on Nuclear Science,
Feb. 1974, p. 232.
Toft, P.; "The Radon Transform - Theory and Implementation",
Ph.D. thesis, Department of Mathematical Modelling, Technical
University of Denmark, June 1996.
"""
if (ellipses is None):
ellipses = _select_phantom (p_type)
elif (size (ellipses, 1) != 6):
raise AssertionError ("Wrong number of columns in user phantom")
# Blank image
p = zeros ((n, n))
# Create the pixel grid
ygrid, xgrid = mgrid[-1:1:(1j*n), -1:1:(1j*n)]
for ellip in ellipses:
I = ellip [0]
a2 = ellip [1]**2
b2 = ellip [2]**2
x0 = ellip [3]
y0 = ellip [4]
phi = ellip [5] * pi / 180 # Rotation angle in radians
# Create the offset x and y values for the grid
x = xgrid - x0
y = ygrid - y0
cos_p = cos (phi)
sin_p = sin (phi)
# Find the pixels within the ellipse
locs = (((x * cos_p + y * sin_p)**2) / a2
+ ((y * cos_p - x * sin_p)**2) / b2) <= 1
# Add the ellipse intensity to those pixels
p [locs] += I
return flipud(p)
def _select_phantom (name):
if (name.lower () == 'shepp-logan'):
e = _shepp_logan ()
elif (name.lower () == 'modified shepp-logan'):
e = _mod_shepp_logan ()
else:
raise ValueError ("Unknown phantom type: %s" % name)
return e
def _shepp_logan ():
# Standard head phantom, taken from Shepp & Logan
return [[ 2, .69, .92, 0, 0, 0],
[-.98, .6624, .8740, 0, -.0184, 0],
[-.02, .1100, .3100, .22, 0, -18],
[-.02, .1600, .4100, -.22, 0, 18],
[ .01, .2100, .2500, 0, .35, 0],
[ .01, .0460, .0460, 0, .1, 0],
[ .02, .0460, .0460, 0, -.1, 0],
[ .01, .0460, .0230, -.08, -.605, 0],
[ .01, .0230, .0230, 0, -.606, 0],
[ .01, .0230, .0460, .06, -.605, 0]]
def _mod_shepp_logan ():
# Modified version of Shepp & Logan's head phantom,
# adjusted to improve contrast. Taken from Toft.
return [[ 1, .69, .92, 0, 0, 0],
[-.80, .6624, .8740, 0, -.0184, 0],
[-.20, .1100, .3100, .22, 0, -18],
[-.20, .1600, .4100, -.22, 0, 18],
[ .10, .2100, .2500, 0, .35, 0],
[ .10, .0460, .0460, 0, .1, 0],
[ .10, .0460, .0460, 0, -.1, 0],
[ .10, .0460, .0230, -.08, -.605, 0],
[ .10, .0230, .0230, 0, -.606, 0],
[ .10, .0230, .0460, .06, -.605, 0]]
#def ?? ():
# # Add any further phantoms of interest here
# return np.array (
# [[ 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0]])
| gpl-3.0 |
nilmtk/nilmtk | nilmtk/disaggregate/fhmm_exact.py | 1 | 10107 | import itertools
from copy import deepcopy
from collections import OrderedDict
from warnings import warn
import pickle
import nilmtk
import pandas as pd
import numpy as np
from hmmlearn import hmm
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
import datetime
import matplotlib.pyplot as plt
def sort_startprob(mapping, startprob):
""" Sort the startprob according to power means; as returned by mapping
"""
num_elements = len(startprob)
new_startprob = np.zeros(num_elements)
for i in range(len(startprob)):
new_startprob[i] = startprob[mapping[i]]
return new_startprob
def sort_covars(mapping, covars):
new_covars = np.zeros_like(covars)
for i in range(len(covars)):
new_covars[i] = covars[mapping[i]]
return new_covars
def sort_transition_matrix(mapping, A):
"""Sorts the transition matrix according to increasing order of
power means; as returned by mapping
Parameters
----------
mapping :
A : numpy.array of shape (k, k)
transition matrix
"""
num_elements = len(A)
A_new = np.zeros((num_elements, num_elements))
for i in range(num_elements):
for j in range(num_elements):
A_new[i, j] = A[mapping[i], mapping[j]]
return A_new
def sort_learnt_parameters(startprob, means, covars, transmat):
mapping = return_sorting_mapping(means)
means_new = np.sort(means, axis=0)
startprob_new = sort_startprob(mapping, startprob)
covars_new = sort_covars(mapping, covars)
transmat_new = sort_transition_matrix(mapping, transmat)
assert np.shape(means_new) == np.shape(means)
assert np.shape(startprob_new) == np.shape(startprob)
assert np.shape(transmat_new) == np.shape(transmat)
return [startprob_new, means_new, covars_new, transmat_new]
def compute_A_fhmm(list_A):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
--------
result : Combined Pi for the FHMM
"""
result = list_A[0]
for i in range(len(list_A) - 1):
result = np.kron(result, list_A[i + 1])
return result
def compute_means_fhmm(list_means):
"""
Returns
-------
[mu, cov]
"""
states_combination = list(itertools.product(*list_means))
num_combinations = len(states_combination)
means_stacked = np.array([sum(x) for x in states_combination])
means = np.reshape(means_stacked, (num_combinations, 1))
cov = np.tile(5 * np.identity(1), (num_combinations, 1, 1))
return [means, cov]
def compute_pi_fhmm(list_pi):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
-------
result : Combined Pi for the FHMM
"""
result = list_pi[0]
for i in range(len(list_pi) - 1):
result = np.kron(result, list_pi[i + 1])
return result
def create_combined_hmm(model):
list_pi = [model[appliance].startprob_ for appliance in model]
list_A = [model[appliance].transmat_ for appliance in model]
list_means = [model[appliance].means_.flatten().tolist()
for appliance in model]
pi_combined = compute_pi_fhmm(list_pi)
A_combined = compute_A_fhmm(list_A)
[mean_combined, cov_combined] = compute_means_fhmm(list_means)
combined_model = hmm.GaussianHMM(n_components=len(pi_combined), covariance_type='full')
combined_model.startprob_ = pi_combined
combined_model.transmat_ = A_combined
combined_model.covars_ = cov_combined
combined_model.means_ = mean_combined
return combined_model
def return_sorting_mapping(means):
means_copy = deepcopy(means)
means_copy = np.sort(means_copy, axis=0)
# Finding mapping
mapping = {}
for i, val in enumerate(means_copy):
mapping[i] = np.where(val == means)[0][0]
return mapping
def decode_hmm(length_sequence, centroids, appliance_list, states):
"""
Decodes the HMM state sequence
"""
hmm_states = {}
hmm_power = {}
total_num_combinations = 1
for appliance in appliance_list:
total_num_combinations *= len(centroids[appliance])
for appliance in appliance_list:
hmm_states[appliance] = np.zeros(length_sequence, dtype=np.int)
hmm_power[appliance] = np.zeros(length_sequence)
for i in range(length_sequence):
factor = total_num_combinations
for appliance in appliance_list:
# assuming integer division (will cause errors in Python 3x)
factor = factor // len(centroids[appliance])
temp = int(states[i]) / factor
hmm_states[appliance][i] = temp % len(centroids[appliance])
hmm_power[appliance][i] = centroids[
appliance][hmm_states[appliance][i]]
return [hmm_states, hmm_power]
class FHMMExact(Disaggregator):
def __init__(self,params):
self.model = {}
self.MODEL_NAME = 'FHMM' # Add the name for the algorithm
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.chunk_wise_training = params.get('chunk_wise_training', False)
self.num_of_states = params.get('num_of_states', 2)
if self.load_model_path:
self.load_model(self.load_model_path)
self.app_names = []
def partial_fit(self, train_main, train_appliances, **load_kwargs):
"""
Train using 1d FHMM.
"""
print(".........................FHMM partial_fit.................")
train_main = pd.concat(train_main, axis=0)
train_app_tmp = []
for app_name, df_list in train_appliances:
df_list = pd.concat(df_list, axis=0)
train_app_tmp.append((app_name,df_list))
self.app_names.append(app_name)
print (train_main.shape)
train_appliances = train_app_tmp
learnt_model = OrderedDict()
num_meters = len(train_appliances)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for appliance, meter in train_appliances:
meter_data = meter.dropna()
X = meter_data.values.reshape((-1, 1))
if not len(X):
print("Submeter '{}' has no samples, skipping...".format(meter))
continue
assert X.ndim == 2
self.X = X
if self.num_of_states > 0:
# User has specified the number of states for this appliance
num_total_states = self.num_of_states
else:
# Find the optimum number of states
states = cluster(meter_data, max_num_clusters)
num_total_states = len(states)
print("Training model for submeter '{}'".format(appliance))
learnt_model[appliance] = hmm.GaussianHMM(num_total_states, "full")
# Fit
learnt_model[appliance].fit(X)
print("Learnt model for : "+appliance)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
# Combining to make a AFHMM
self.meters = []
new_learnt_models = OrderedDict()
for meter in learnt_model:
print(meter)
startprob, means, covars, transmat = sort_learnt_parameters(
learnt_model[meter].startprob_, learnt_model[meter].means_,
learnt_model[meter].covars_, learnt_model[meter].transmat_)
new_learnt_models[meter] = hmm.GaussianHMM(startprob.size, "full")
new_learnt_models[meter].startprob_ = startprob
new_learnt_models[meter].transmat_ = transmat
new_learnt_models[meter].means_ = means
new_learnt_models[meter].covars_ = covars
# UGLY! But works.
self.meters.append(meter)
learnt_model_combined = create_combined_hmm(new_learnt_models)
self.individual = new_learnt_models
self.model = learnt_model_combined
print("print ...........",self.model)
print("FHMM partial_fit end.................")
def disaggregate_chunk(self, test_mains_list):
"""Disaggregate the test data according to the model learnt previously
Performs 1D FHMM disaggregation.
For now assuming there is no missing data at this stage.
"""
# See v0.1 code
# for ideas of how to handle missing data in this code if needs be.
# Array of learnt states
test_prediction_list = []
for test_mains in test_mains_list:
learnt_states_array = []
if len(test_mains) == 0:
tmp = pd.DataFrame(index = test_mains.index, columns = self.app_names)
test_prediction_list.append(tmp)
else:
length = len(test_mains.index)
temp = test_mains.values.reshape(length, 1)
learnt_states_array.append(self.model.predict(temp))
# Model
means = OrderedDict()
for elec_meter, model in self.individual.items():
means[elec_meter] = (
model.means_.round().astype(int).flatten().tolist())
means[elec_meter].sort()
decoded_power_array = []
decoded_states_array = []
for learnt_states in learnt_states_array:
[decoded_states, decoded_power] = decode_hmm(
len(learnt_states), means, means.keys(), learnt_states)
decoded_states_array.append(decoded_states)
decoded_power_array.append(decoded_power)
appliance_powers = pd.DataFrame(decoded_power_array[0], dtype='float32')
test_prediction_list.append(appliance_powers)
return test_prediction_list | apache-2.0 |
bzero/statsmodels | statsmodels/sandbox/examples/try_quantile_regression.py | 33 | 1302 | '''Example to illustrate Quantile Regression
Author: Josef Perktold
'''
import numpy as np
from statsmodels.compat.python import zip
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 5
nobs, k_vars = 500, 5
x = np.random.randn(nobs, k_vars)
#x[:,0] = 1
y = x.sum(1) + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
exog = np.column_stack((np.ones(nobs), x))
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.25)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
##print 'ols ', res_ols.params
##print '0.25', res_qr2
##print '0.5 ', res_qr
##print '0.75', res_qr3
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
#sortidx = np.argsort(y)
fitted_ols = np.dot(res_ols.model.exog, params[0])
sortidx = np.argsort(fitted_ols)
x_sorted = res_ols.model.exog[sortidx]
fitted_ols = np.dot(x_sorted, params[0])
plt.figure()
plt.plot(y[sortidx], 'o', alpha=0.75)
for lab, beta in zip(['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(x_sorted, beta)
lw = 2 if lab == 'ols' else 1
plt.plot(fitted, lw=lw, label=lab)
plt.legend()
plt.show()
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
LaurencePeanuts/Music | beatbox/ctu2015.py | 3 | 9335 | import numpy as np
import matplotlib.pylab as plt
import ipdb
plt.ion()
np.random.seed(3) # for reproduceability
r_cmb_mpc = 14.0
cmap = 'gray'
vscale = 15.
def demo():
"""
Short demo made at the "Compute the Universe 2015" Hack Week,
Berkeley. This was originally part of the "universe" class file
but has been extracted and tidied away here. The "Beatbox_Demo"
notebook should still work, though.
"""
# Generate fake CMB data on a Healpix sphere.
f = FakeHealpixData()
f.show()
# Define a 2d slice through our universe.
s = SliceSurface()
# Define an Inference object, then infer and visualize
# the minimum-variance phi field on the slice, given data
# on the sphere.
inf = Inference(f, s)
inf.calculate_mv_phi()
inf.view_phi_mv_slice()
# Make a bunch of realizations and analyze/visualize them.
'''
# RK: realizations have lower variance around the CMB ring (good), but
# have too-high variance in center of ring. I think it's an artifact
# of how I've defined the correlation/covariance function, namely as
# an integral that starts at k_min = 2*pi/(2*r_cmb). Not sure where
# to go from here.
slice_realizations = []
for i in range(20):
print i
this_slice_realization = inf.calculate_phi_realization()
slice_realizations.append(this_slice_realization)
slice_realizations = np.array(slice_realizations)
ipdb.set_trace()
'''
class CartesianCoordinates(object):
def __init__(self):
pass
def update_xyz(self):
self.xyz = np.vstack([self.x, self.y, self.z]).T
def make_distance_array(self, other_cart_coord):
#print '...making distance array...'
# Fast pairwise distances, see
# https://jakevdp.github.io/blog/2013/06/15/numba-vs-cython-take-2/
from scipy.spatial.distance import cdist
return cdist(self.xyz, other_cart_coord.xyz)
def make_auto_distance_array(self):
return self.make_distance_array(self)
class SliceSurface(CartesianCoordinates):
def __init__(self, position=0., side_mpc=30., reso_mpc=0.8):
self.side_mpc = side_mpc
self.reso_mpc = reso_mpc
n_side = np.ceil(side_mpc/reso_mpc)
self.n_side = n_side
x_2d = self.reso_mpc*np.tile(np.arange(n_side),n_side).reshape(n_side, n_side)
x_2d -= x_2d.mean()
y_2d = self.reso_mpc*np.tile(np.arange(n_side),n_side).reshape(n_side, n_side).T
y_2d -= y_2d.mean()
z_2d = self.reso_mpc*np.zeros_like(x_2d) + position
z_2d -= z_2d.mean()
self.n_side = n_side
self.x = x_2d.ravel()
self.y = y_2d.ravel()
self.z = z_2d.ravel()
self.update_xyz()
class HealpixSphericalSurface(CartesianCoordinates):
def __init__(self, radius_mpc=r_cmb_mpc, n_side=2**4):
# FYI: n_side = 2**4 corresponds to
# 0.064 radians resolution = ~0.9 Mpc at z~1000.
from healpy import nside2npix, pix2vec
self.n_pix = nside2npix(n_side)
x, y, z = pix2vec(n_side, range(self.n_pix))
self.radius_mpc = radius_mpc
self.x = self.radius_mpc*x
self.y = self.radius_mpc*y
self.z = self.radius_mpc*z
self.update_xyz()
class FakeHealpixData(HealpixSphericalSurface):
def __init__(self, sigma=1e-10):
HealpixSphericalSurface.__init__(self)
self.sigma = sigma
self.data = np.zeros(self.n_pix)
self.add_truth()
self.add_noise()
def add_truth(self):
#print '...adding truth...'
distance = self.make_auto_distance_array()
delta = distance[distance!=0].min()
cov = large_scale_phi_covariance(distance)
from numpy.random import multivariate_normal
self.data += multivariate_normal(np.zeros(self.n_pix), cov)
def add_noise(self):
#print '...adding noise...'
from numpy.random import randn
self.data += self.sigma*randn(self.n_pix)
pass
def show(self):
from healpy import mollview
mollview(self.data)#, cmap=cmap, min=-vscale, max=+vscale)
class HealpixPlusSlice(CartesianCoordinates):
def __init__(self):
healpix = HealpixSphericalSurface()
slice = SliceSurface()
self.n_healpix = len(healpix.x)
self.n_slice = len(slice.x)
self.n_total = self.n_healpix + self.n_slice
self.ind_healpix = range(0, self.n_healpix)
self.ind_slice = range(self.n_healpix, self.n_total)
self.x = np.hstack([healpix.x, slice.x])
self.y = np.hstack([healpix.y, slice.y])
self.z = np.hstack([healpix.z, slice.z])
self.update_xyz()
def large_scale_phi_covariance(distance):
# should be something like
# cov(r) ~ Int(dk * sin(k*r)/(k**2 * r) )
# see Equation 9.32 from Dodelson's Cosmology.
# The integral will diverge unless we put in this k_min.
k_min = 2.*np.pi / (2. * r_cmb_mpc) # hack
k_max = 2.*np.pi / (2. * 0.25) # hack
# Evaluate covariance on 1d grid.
k_vec = np.arange(k_min, k_max, k_min/4.)
d_vec = np.arange(0., 1.01*distance.max(), 0.1)
pk_phi = k_vec**(-3.)
kd_vec = k_vec * d_vec[:,np.newaxis]
from scipy.special import jv
cov_vec = np.sum(pk_phi / k_vec * k_vec**3. * jv(0, kd_vec), axis=1)
#plt.plot(d_vec, cov_vec)
# Now interpolate onto 2d grid.
from scipy import interpolate
f = interpolate.interp1d(d_vec, cov_vec)
cov = f(distance)
# Let's force the covariance to be unity along the diagonal.
# I.e. let's define the variance of each point to be 1.0.
#cov_diag = cov.diagonal().copy()
#cov /= np.sqrt(cov_diag)
#cov /= np.sqrt(cov_diag.T)
return cov
class Inference(object):
def __init__(self, data_object, test_object):
# DATA_OBJECT is e.g. a FakeHealpixData object.
# It's where you have data.
# TEST_OBJECT is e.g. a SliceSurface object.
# It's where you want to make inferences.
self.data = data_object
self.test = test_object
def calculate_phi_realization(self):
###############################################################
# Coded up from Equation 18 in Roland's note,
# https://www.dropbox.com/s/hsq44r7cs1rwkuq/MusicofSphere.pdf
# Is there a faster algorithm than this?
###############################################################
# Ryan's understanding of this:
# Define a coordinate object that includes points on the sphere
# and on a 2d slice.
joint = HealpixPlusSlice()
# Do some preparatory work.
# We only do this once when making multiple realizations.
if not(hasattr(self, 'cov_joint')):
dist = joint.make_auto_distance_array()
cov_joint = large_scale_phi_covariance(dist)
self.cov_joint = cov_joint
if not(hasattr(self, 'phi_mv')):
self.calculate_mv_phi()
# Generate noise-free truth *simultaneously* on Sphere and Slice.
from numpy.random import multivariate_normal
realization_truth = multivariate_normal(np.zeros(joint.n_total), self.cov_joint)
sphere_truth = realization_truth[joint.ind_healpix]
slice_truth = realization_truth[joint.ind_slice]
# Add noise to Sphere points.
noise = self.data.sigma*np.random.randn(joint.n_healpix)
sphere_data = sphere_truth + noise
# Generate MV estimate on Slice.
tmp = np.dot(self.inv_cov_data_data , sphere_data)
this_phi_mv_slice = np.dot(self.cov_data_test.T, tmp)
# Get the difference of the MV estimate on Slice and the truth on Slice.
diff_mv = this_phi_mv_slice - slice_truth
# Add that difference to your *original* MV estimate on Slice.
# Now you have a sample/realization of the posterior on the Slice, given original data.
this_realization = self.phi_mv + diff_mv
return this_realization
def calculate_mv_phi(self):
self.get_data_data_covariance()
self.get_data_test_covariance()
tmp = np.dot(self.inv_cov_data_data , self.data.data)
self.phi_mv = np.dot(self.cov_data_test.T, tmp)
def get_data_data_covariance(self):
# Get phi covariance between data space and data space.
from numpy.linalg import inv
dist_data_data = self.data.make_auto_distance_array()
cov_data_data = large_scale_phi_covariance(dist_data_data)
self.inv_cov_data_data = inv(cov_data_data)
def get_data_test_covariance(self):
# Get phi covariance between data space and test space.
dist_data_test = self.data.make_distance_array(self.test)
cov_data_test = large_scale_phi_covariance(dist_data_test)
self.cov_data_test = cov_data_test
def view_phi_mv_slice(self):
self.view_slice(self.phi_mv)
def view_slice(self, slice_1d):
slice_2d = slice_1d.reshape(self.test.n_side, self.test.n_side)
plt.figure(figsize=(7,7))
plt.imshow(slice_2d, cmap=cmap, vmin=-vscale, vmax=+vscale)
| mit |
jooojo/cnnTSR | train.py | 1 | 2490 | #%%
'''Reads traffic sign data for German Traffic Sign Recognition Benchmark. '''
from os.path import join
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.misc import imresize
from sklearn.model_selection import train_test_split
TRAIN_PATH = r".\Final_Training\Images"
images = [] # images
labels = [] # corresponding labels
# loop over all 43 classes
for c in range(0, 43):
prefix = join(TRAIN_PATH, str(c).zfill(5)) # subdirectory for class
gtFile = join(prefix, 'GT-' + str(c).zfill(5) + '.csv') # annotations file
dataFrame = pd.read_csv(gtFile, delimiter=';')
# loop over all images in current annotations file
for _, row in dataFrame.iterrows():
img = plt.imread(join(prefix, row['Filename'])) # the 1th column is the filename
img = img[row['Roi.X1']:row['Roi.X2'], row['Roi.Y1']:row['Roi.Y2'], :]
img = imresize(img, (32, 32))
images.append(img)
labels.append(row.ClassId) # the 8th column is the label
X = np.stack(images)
Y = np.asarray(labels)
#%%
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
nb_classes = 43
nb_filters = 64
kernel_size = (3, 3)
pool_size = (2, 2)
input_shape = (32, 32, 3)
X = X.astype('float32')
X /= 255
Y = np_utils.to_categorical(Y, nb_classes)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model = Model(input=model.inputs, output=model.outputs)
model.load_weights(r'.\cnn.h5')
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=128, nb_epoch=16,
validation_data=(X_test, Y_test), initial_epoch=10)
model.save(r'.\cnn.h5')
| mit |
RomainBrault/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 70 | 7486 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/tsa/stattools.py | 26 | 37127 | """
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types, lzip,
zip, map)
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from .tsatools import lagmat, lagmat2ds, add_trend
from .adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa.arima_model import ARMA
from statsmodels.compat.scipy import _next_regular
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller']
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximimizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class.
modargs : tuple
args to pass to model. See notes.
fitargs : tuple
args to pass to fit. See notes.
lagstart : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : str {"aic","bic","t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
'''
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : str {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterium
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant at
the 95 % level.
store : bool
If True, then a result instance is returned additionally to
the adf statistic (default is False)
regresults : bool
If True, the full regression results are returned (default is False)
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
usedlag : int
Number of lags used.
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
regresults : RegressionResults instance
The
resstore : (optional) instance of ResultStore
an instance of a dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables.
If the p-value is close to significant, then the critical values should be
used to judge whether to accept or reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
see example script
References
----------
Greene
Hamilton
P-Values (regression surface approximation)
MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
Critical values
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
'''
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False):
'''
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
acovf : array
autocovariance function
'''
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
n = len(x)
if demean:
xo = x - x.mean()
else:
xo = x
if unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
else:
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
Frf = np.fft.fft(xo, n=nobs * 2)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[n - 1:]
return acov.real
else:
return (np.correlate(xo, xo, 'full') / d)[n - 1:]
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None):
'''
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
nobs = len(x)
d = nobs # changes if unbiased
if not fft:
avf = acovf(x, unbiased=unbiased, demean=True)
#acf = np.take(avf/avf[0], range(1,nlags+1))
acf = avf[:nlags + 1] / avf[0]
else:
x = np.squeeze(np.asarray(x))
#JP: move to acovf
x0 = x - x.mean()
# ensure that we always use a power of 2 or 3 for zero-padding,
# this way we'll ensure O(n log n) runtime of the fft.
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(x0, n=n) # zero-pad for separability
if unbiased:
d = nobs - np.arange(nobs)
acf = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d
acf /= acf[0]
#acf = np.take(np.real(acf), range(1,nlags+1))
acf = np.real(acf[:nlags + 1]) # keep lag 0
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
'''Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
'''
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d, (nobs,2)
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y1, y2, regression="c"):
"""
This is a simple cointegration test. Uses unit-root test on residuals to
test for cointegrated relationship
See Hamilton (1994) 19.2
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
c : str {'c'}
Included in regression
* 'c' : Constant
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values are obtained through regression surface approximation from
MacKinnon 1994.
References
----------
MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
"""
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
y1 = np.asarray(y1)
y2 = np.asarray(y2)
if regression == 'c':
y2 = add_constant(y2, prepend=False)
st1_resid = OLS(y1, y2).fit().resid # stage one residuals
lgresid_cons = add_constant(st1_resid[0:-1], prepend=False)
uroot_reg = OLS(st1_resid[1:], lgresid_cons).fit()
coint_t = (uroot_reg.params[0] - 1) / uroot_reg.bse[0]
pvalue = mackinnonp(coint_t, regression="c", N=2, lags=None)
crit_value = mackinnoncrit(N=1, regression="c", nobs=len(y1))
return coint_t, pvalue, crit_value
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ((hasattr(error, 'message') and 'initial' not in error.message)
or 'initial' in str(error)): # py2 and py3
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.macrodata.load().data
x = data['realgdp']
# adf is tested now.
adf = adfuller(x, 4, autolag=None)
adfbic = adfuller(x, autolag="bic")
adfaic = adfuller(x, autolag="aic")
adftstat = adfuller(x, autolag="t-stat")
# acf is tested now
acf1, ci1, Q, pvalue = acf(x, nlags=40, confint=95, qstat=True)
acf2, ci2, Q2, pvalue2 = acf(x, nlags=40, confint=95, fft=True, qstat=True)
acf3, ci3, Q3, pvalue3 = acf(x, nlags=40, confint=95, qstat=True,
unbiased=True)
acf4, ci4, Q4, pvalue4 = acf(x, nlags=40, confint=95, fft=True, qstat=True,
unbiased=True)
# pacf is tested now
# pacf1 = pacorr(x)
# pacfols = pacf_ols(x, nlags=40)
# pacfyw = pacf_yw(x, nlags=40, method="mle")
y = np.random.normal(size=(100, 2))
grangercausalitytests(y, 2)
| bsd-3-clause |
dmitryduev/pypride | src/pypride/classes.py | 1 | 108453 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 17:05:04 2013
Definitions of classes used in pypride
@author: Dmitry A. Duev
"""
import ConfigParser
import datetime
from astropy.time import Time
#import sys
import numpy as np
import scipy as sp
#from matplotlib.cbook import flatten
from math import *
#from math import sin, cos, sqrt, floor
from sklearn.base import BaseEstimator
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LinearRegression
from astropy.time import Time
import struct
import os
import inspect
from copy import deepcopy
#from pypride.vintflib import lagint
try:
from pypride.vintflib import lagint, pleph#, iau_xys00a_fort, admint2
except:
# compile the Fortran code if necessary
from numpy import f2py
abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))
fid = open(os.path.join(abs_path, 'vintflib.f'))
source = fid.read()
fid.close()
f2py.compile(source, modulename='vintflib')
from pypride.vintflib import lagint, pleph#, iau_xys00a_fort, admint2
#from time import time as _time
#from numba import jit
cheb = np.polynomial.chebyshev
norm = np.linalg.norm
'''
#==============================================================================
# Polynomial regression
#==============================================================================
'''
class PolynomialRegression(BaseEstimator):
def __init__(self, deg=None):
self.deg = deg
self.model = LinearRegression(fit_intercept=False)
def fit(self, X, y):
# self.model.fit(np.vander(X, N=self.deg + 1), y, n_jobs=-1)
self.model.fit(np.vander(X, N=self.deg + 1), y)
def predict(self, x):
try:
len(x)
x = np.array(x)
except:
x = np.array([x])
return self.model.predict(np.vander(x, N=self.deg + 1))
@property
def coef_(self):
return self.model.coef_
'''
#==============================================================================
# Chebyshev regression
#==============================================================================
'''
class ChebyshevRegression(BaseEstimator):
def __init__(self, deg=None):
self.deg = deg
def fit(self, X, y):
self.chefit = cheb.chebfit(X, y, self.deg)
def predict(self, x):
return cheb.chebval(x, self.chefit)
@property
def coef_(self):
return self.chefit
'''
#==============================================================================
# Optimal fit to data
#==============================================================================
'''
def optimalFit(x, y, min_order=0, max_order=8, fit_type='poly'):
# initialise optimal estimator:
if fit_type == 'poly':
estimator = PolynomialRegression()
elif fit_type == 'cheb':
estimator = ChebyshevRegression()
else:
raise Exception('unknown fit type')
degrees = np.arange(min_order, max_order)
cv_model = GridSearchCV(estimator,
param_grid={'deg': degrees},
scoring='mean_squared_error')
cv_model.fit(x, y)
# use as: cv_model.predict(x_new)
return cv_model
'''
#==============================================================================
# Interpolate using polyfit
#==============================================================================
'''
def localfit(x, y, xn, points=5, poly=2):
"""
interpolate at each xn
Don't forget to normalise output
"""
if isinstance(xn, float) or isinstance(xn, int):
xn = np.array([xn], dtype=np.float)
yn = []
for nn, xi in enumerate(xn):
n0 = np.searchsorted(x, xi)
# number of points to cut from the left-hand side
nl = int(np.floor(points / 2.0))
# number of points to cut from the right-hand side
nr = int(np.ceil(points / 2.0))
# check/correct bounds:
if len(x[:n0]) < nl:
nr = nr + nl - len(x[:n0])
nl = len(x[:n0])
if len(x[n0:]) < nr:
nl = nl + nr - len(x[n0:])
nr = len(x[n0:])
# make a fit
yfit = np.polyfit(x[n0 - nl:n0 + nr], y[n0 - nl:n0 + nr], poly)
yn.append(np.polyval(yfit, xi))
return np.array(yn, dtype=np.float) if len(yn) > 1 else float(yn[0])
'''
#==============================================================================
# Input settings
#==============================================================================
'''
class inp_set(object):
"""
input settings: catalogs, directories, etc
"""
def __init__(self, inp_file):
self.inp_file = inp_file
self.config = ConfigParser.RawConfigParser()
self.config.read(self.inp_file)
# absolute path
# self.abs_path = self.config.get('Catalogues', 'abs_path')
# self.abs_path = os.getcwd()
# self.abs_path = os.path.dirname(os.path.abspath(__file__))
self.abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))
# paths
self.sta_xyz = self.config.get('Catalogues', 'sta_xyz')
self.sta_vxvyvz = self.config.get('Catalogues', 'sta_vxvyvz')
self.sta_axo = self.config.get('Catalogues', 'sta_axo')
self.oc_load = self.config.get('Catalogues', 'oc_load')
self.atm_load = self.config.get('Catalogues', 'atm_load')
self.sta_nam = self.config.get('Catalogues', 'sta_nam')
self.cat_eop = self.config.get('Catalogues', 'cat_eop')
self.sta_thermdef = self.config.get('Catalogues', 'sta_thermdef')
self.source_cat = self.config.get('Catalogues', 'source_cat')
self.source_nam = self.config.get('Catalogues', 'source_nam')
self.shnames_cat = self.config.get('Catalogues', 'shnames_cat')
self.shnames_cat_igs = self.config.get('Catalogues', 'shnames_cat_igs')
self.meteo_cat = self.config.get('Catalogues', 'meteo_cat')
self.ion_cat = self.config.get('Catalogues', 'ion_cat')
self.f_ramp = self.config.get('Catalogues', 'f_ramp')
self.f_ramp1w = self.config.get('Catalogues', 'f_ramp1w')
self.f_gc = self.config.get('Catalogues', 'f_gc')
self.jpl_eph = self.config.get('Ephemerides', 'jpl_eph')
self.sc_eph_cat = self.config.get('Ephemerides', 'sc_eph_cat')
self.obs_path = self.config.get('Directories', 'obs_path')
self.out_path = self.config.get('Directories', 'out_path')
# relative paths? make them absolute:
if self.sta_xyz[0]!='/':
self.sta_xyz = os.path.join(self.abs_path, self.sta_xyz)
if self.sta_vxvyvz[0]!='/':
self.sta_vxvyvz = os.path.join(self.abs_path, self.sta_vxvyvz)
if self.sta_axo[0]!='/':
self.sta_axo = os.path.join(self.abs_path, self.sta_axo)
if self.oc_load[0]!='/':
self.oc_load = os.path.join(self.abs_path, self.oc_load)
if self.atm_load[0]!='/':
self.atm_load = os.path.join(self.abs_path, self.atm_load)
if self.sta_nam[0]!='/':
self.sta_nam = os.path.join(self.abs_path, self.sta_nam)
if self.cat_eop[0]!='/':
self.cat_eop = os.path.join(self.abs_path, self.cat_eop)
if self.sta_thermdef[0]!='/':
self.sta_thermdef = os.path.join(self.abs_path, self.sta_thermdef)
if self.source_cat[0]!='/':
self.source_cat = os.path.join(self.abs_path, self.source_cat)
if self.source_nam[0]!='/':
self.source_nam = os.path.join(self.abs_path, self.source_nam)
if self.shnames_cat[0]!='/':
self.shnames_cat = os.path.join(self.abs_path, self.shnames_cat)
if self.shnames_cat_igs[0]!='/':
self.shnames_cat_igs = os.path.join(self.abs_path, self.shnames_cat_igs)
if self.meteo_cat[0]!='/':
self.meteo_cat = os.path.join(self.abs_path, self.meteo_cat)
if self.ion_cat[0]!='/':
self.ion_cat = os.path.join(self.abs_path, self.ion_cat)
if self.f_ramp[0]!='/':
self.f_ramp = os.path.join(self.abs_path, self.f_ramp)
if self.f_ramp1w[0]!='/':
self.f_ramp1w = os.path.join(self.abs_path, self.f_ramp1w)
if self.f_gc[0]!='/':
self.f_gc = os.path.join(self.abs_path, self.f_gc)
if self.jpl_eph[0]!='/':
self.jpl_eph = os.path.join(self.abs_path, self.jpl_eph)
if self.sc_eph_cat[0]!='/':
self.sc_eph_cat = os.path.join(self.abs_path, self.sc_eph_cat)
if self.obs_path[0]!='/':
self.obs_path = os.path.join(self.abs_path, self.obs_path)
if self.out_path[0]!='/':
self.out_path = os.path.join(self.abs_path, self.out_path)
# phase center:
self.phase_center = self.config.get('Models', 'phase_center')
# Near-field model:
self.nf_model = self.config.get('Models', 'nf_model')
#tropo and iono:
self.do_trp_calc = self.config.getboolean('Switches', 'do_trp_calc')
self.tropo_model = self.config.get('Models', 'tropo_model')
self.do_trp_grad_calc = self.config.getboolean('Switches', 'do_trp_grad_calc')
self.do_ion_calc = self.config.getboolean('Switches', 'do_ion_calc')
self.iono_model = self.config.get('Models', 'iono_model')
# Calculate delay prediction?
self.delay_calc = self.config.getboolean('Switches', 'delay_calc')
# Calculate uvws?
self.uvw_calc = self.config.getboolean('Switches', 'uvw_calc')
# note that 2nd station is the transmitter if 2(3)-way:
self.doppler_calc = self.config.getboolean('Switches', 'doppler_calc')
# Doppler prediction model
self.dop_model = self.config.get('Models', 'dop_model')
# Doppler mode parametres for 2(3)-way
# self.uplink_sta = self.config.get('Models', 'uplink_sta')
# self.freq_type = self.config.get('Models', 'freq_type')
# self.freq = self.config.getfloat('Models', 'freq')
self.tr = self.config.getfloat('Models', 'tr')
# Jacobians
# generate additional ephs and calc delays for S/C position correction
self.sc_rhophitheta = self.config.getboolean('Switches',
'sc_rhophitheta')
self.mas_step = self.config.getfloat('Models', 'mas_step')
self.m_step = self.config.getfloat('Models', 'm_step')
# generate additional ephs and calc delays for RA/GNSS XYZ pos corr
self.sc_xyz = self.config.getboolean('Switches', 'sc_xyz')
self.m_step_xyz = self.config.getfloat('Models', 'm_step_xyz')
# force update s/c ephemetis
self.sc_eph_force_update = self.config.getboolean('Switches', \
'sc_eph_force_update')
def get_section(self, section='all'):
# returns dictionary containing section (without '__name__' key)
if section!='all':
if section in ('Catalogues', 'Ephemerides', 'Directories'):
out = dict((k,os.path.join(self.abs_path, v)) for k, v in \
self.config._sections[section].iteritems() if k!='__name__')
else:
out = dict((k,v) for k, v in \
self.config._sections[section].iteritems() if k!='__name__')
else:
out = {}
for section in self.config._sections.keys():
if section in ('Catalogues', 'Ephemerides', 'Directories'):
o = dict((k,os.path.join(self.abs_path, v)) for k, v in \
self.config._sections[section].iteritems() if k!='__name__')
else:
o = dict((k,v) for k, v in \
self.config._sections[section].iteritems() if k!='__name__')
out.update(o)
# replace stupidness:
for k, v in out.iteritems():
if v=='False': out[k] = False
if v=='True': out[k] = True
if v=='None': out[k] = None
if k in ('tr', 'mas_step', 'm_step', 'm_step_xyz'):
out[k] = float(out[k])
return out
'''
#==============================================================================
# Obs object - contains info about obs to be processed
#==============================================================================
'''
class obs(object):
"""
([sta], source, sou_type, start, step, stop)
the class contains data previously stored in .obs-files
for each source on each baseline back in Matlab days
"""
def __init__(self, sta, source, sou_type, exp_name='', sou_radec=None, inp=None):
#self.sta = [sta1, sta2]
self.sta = sta
self.source = source
if sou_type not in ('C', 'S', 'R', 'G'):
raise Exception('Unrecognised source type.')
self.sou_type = sou_type
self.sou_radec = sou_radec
self.exp_name = exp_name
self.upSta = [] # list of uplink stations in case of n-way Doppler, n>1
self.tstamps = [] # list of datetime objs [datetime(2013,9,18,14,28),.]
self.scanStartTimes = [] # might be useful.. integrate the phase?
self.scanStopTimes = [] # to put trailing zeros in proper places (.del)
self.freqs = [] # list of obs freqs as defined in $MODE
self.dude = dude() # Delays, Uvws, Doppler, Etc. = DUDE
# pointings
self.pointingsJ2000 = []
self.pointingsDate = []
self.azels = []
# input switches (like what to calculate and what not)
# default values
self.inp = {'do_trp_calc':False, 'do_ion_calc':False,
'delay_calc':False, 'uvw_calc':False,
'doppler_calc':False, 'sc_rhophitheta':False,
'sc_xyz':False}
# set things that have been passed in inp
if inp!=None:
for k,v in inp.iteritems():
self.inp[k] = v
self.inp = inp
def __str__(self):
if self.inp['delay_calc']:
echo = 'baseline: {:s}-{:s}, source: {:s}'.\
format(self.sta[0], self.sta[1], self.source)
if len(self.scanStartTimes)>0:
echo += ', time range: {:s} - {:s}, n_obs: {:d}'.\
format(str(self.scanStartTimes[0]),
str(self.scanStopTimes[-1]), len(self.tstamps))
return echo
elif self.inp['doppler_calc']:
if len(self.sta)==2:
echo = 'RX station: {:s}, TX station: {:s}, source: {:s}'.\
format(self.sta[0], self.sta[1], self.source)
elif len(self.sta)==1:
echo = 'RX station: {:s}, source: {:s}'.\
format(self.sta[0], self.source)
if len(self.scanStartTimes)>0:
echo += ', time range: {:s} - {:s}, n_obs: {:d}'.\
format(str(self.scanStartTimes[0]),
str(self.scanStopTimes[-1]), len(self.tstamps))
return echo
else:
if len(self.scanStartTimes)>0:
echo = 'time range: {:s} - {:s}, n_obs: {:d}'.\
format(str(self.scanStartTimes[0]),
str(self.scanStopTimes[-1]), len(self.tstamps))
return 'probably a fake obs object for one reason or another\n' + echo
# def __str__(self):
# if self.tstamps == []:
# if self.exp_name == '':
# return "baseline: %s-%s\nsource: %s\nsouce type: %s" % (self.sta[0],
# self.sta[1], self.source, self.sou_type)
# else:
# return "exp name: %s\nbaseline: %s-%s\nsource: %s\nsouce type: %s" \
# % (self.exp_name, self.sta[0], self.sta[1], self.source, self.sou_type)
# else:
# tf = []
# for jj in range(len(self.tstamps)):
# if len(self.freqs)!=0:
# tf.append(str(self.tstamps[jj]) + ' f=' + str(self.freqs[jj]) + ' Hz')
# else:
# tf.append(str(self.tstamps[jj]))
# if self.exp_name == '':
# return "baseline: %s-%s\nsource: %s\nsouce type: %s\nscans:\n%s" \
# % (self.sta[0], self.sta[1], self.source, self.sou_type,
# '\n'.join(map(str, tf )))
# else:
# return "exp name: %s\nbaseline: %s-%s\nsource: %s\nsouce type: %s\nscans:\n%s" \
# % (self.exp_name, self.sta[0], self.sta[1], self.source, self.sou_type,
# '\n'.join(map(str, tf )))
@staticmethod
def factors(n):
# factorise a number
facs = set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
return sorted(list(facs))
def resample(self, tstep=1):
# resample scan time stamps
tstamps = []
# freqs = []
for start, stop in zip(self.scanStartTimes, self.scanStopTimes):
stopMinusStart = stop - start
# step must be a multiple of stopMinusStart.total_seconds()
# also allow for non-integer step
# if (stopMinusStart.total_seconds())%step == 0:
if (stopMinusStart.total_seconds())%tstep < 1e-9:
nobs = stopMinusStart.total_seconds()/tstep + 1
else:
print 'bad t_step: not a multiple of N_sec. set to nearest possible.'
facs = self.factors(stopMinusStart.total_seconds())
# use smaller value (to the left). max for if pos is [0]
pos = max(0, np.searchsorted(facs, tstep) - 1)
tstep = facs[pos]
nobs = stopMinusStart.total_seconds()/tstep + 1
for ii in range(int(nobs)):
tstamps.append(start + ii*datetime.timedelta(seconds=tstep))
# if len(self.freqs)>0:
# f = [ f for t, f in zip(self.tstamps, self.freqs) \
# if start <= t <= stop ][0]
# for ii in range(int(nobs)):
# freqs.append(f)
self.tstamps = tstamps
# self.freqs = freqs
# @staticmethod
# def freqRamp(cat_dir='cats/ramp.', sc=None):
# # read frequency ramping parameters
# if sc==None:
# raise Exception('Can\'t load ramp params: S/C not specified.')
# else:
# rampFile = ''.join((cat_dir, sc.lower()))
# try:
# with open(rampFile, 'r') as f:
# f_lines = f.readlines()
# ramp = []
# for line in f_lines:
# line = line.split()
# t_start = ''.join((line[0], ' ', line[1]))
# t_stop = ''.join((line[2], ' ', line[3]))
# # [t_start t_stop f_0 df uplink_sta]
# ramp.append([datetime.datetime.strptime(t_start, \
# "%Y-%m-%d %H:%M:%S"), \
# datetime.datetime.strptime(t_stop, \
# "%Y-%m-%d %H:%M:%S"), \
# float(line[4]), float(line[5]), line[6] ])
# return ramp
# except Exception, err:
# print str(err)
# print 'Could not load ramp params for ' + sc + '.'
def addScan(self, start, step, stop=None, nobs=None, freq=None,
force_step=False):
# start and stop must be a datetime object
# step is an integer time step in seconds
if stop is None and nobs is None:
raise Exception('You should specify either "stop" time \
or number of time stamps "nobs" to add')
elif stop is None:
# nobs is set
self.scanStartTimes.append(start)
nobs = int(nobs) # this is needed for the range() function
stop = start + (nobs-1)*datetime.timedelta(seconds=step)
self.scanStopTimes.append(stop)
for ii in range(nobs):
tmp = start + ii*datetime.timedelta(seconds=step)
# avoid duplicates! this might become too slow, so drop it...
# if tmp not in self.tstamps:
self.tstamps.append(tmp)
elif nobs is None:
# stop is set
self.scanStartTimes.append(start)
self.scanStopTimes.append(stop)
stopMinusStart = stop - start
# step must be a multiple of stopMinusStart.total_seconds()
# also allow for non-integer step
# if (stopMinusStart.total_seconds())%step == 0:
# if (stopMinusStart.total_seconds())%step < 1e-9:
if modf(stopMinusStart.total_seconds()/step)[0] < 1e-9:
nobs = int(stopMinusStart.total_seconds()/step + 1)
else:
if not force_step:
print 'bad t_step: not a multiple of N_sec. set to nearest possible.'
facs = self.factors(stopMinusStart.total_seconds())
# use smaller value (to the left). max for if pos is [0]
pos = max(0, np.searchsorted(facs, step) - 1)
step = facs[pos]
nobs = int(stopMinusStart.total_seconds() / step + 1)
else:
# force time step?
nobs = int(stopMinusStart.total_seconds() // step + 1)
for ii in range(int(nobs)):
tmp = start + ii*datetime.timedelta(seconds=step)
# avoid duplicates! this might become too slow, so drop it...
# if tmp not in self.tstamps:
self.tstamps.append(tmp)
# fix the case with the forced step:
if force_step:
self.tstamps.append(stop)
# frequency for ionospheric delay calculation
if freq is not None:
self.freqs.append([start, stop, freq, 0, None])
def splitScans(self, duration=60):
"""
Split longer scans into shorter ones.
useful for running together with handy.py (on very long scan) and self.smoothDude:
increase t_step for a faster computation, then split into shorter subscans,
and run self.smoothDude()
Args:
duration: subscan duration in seconds. if last subscan is shorter,
it get appended to the last but one
Returns:
"""
scanStartTimes = []
scanStopTimes = []
freqs = []
s = 0
for start, stop in zip(self.scanStartTimes, self.scanStopTimes):
stopMinusStart = stop - start
# skip scan if it's already shorter than duration
if stopMinusStart < datetime.timedelta(seconds=duration):
continue
n_subscans = int(stopMinusStart.total_seconds() // duration)
for ii in range(n_subscans):
subScanStartNominal = start + ii * datetime.timedelta(seconds=duration)
subScanStopNominal = start + (ii+1) * datetime.timedelta(seconds=duration)
tstamps_cut = [t for t in self.tstamps
if subScanStartNominal <= t <= subScanStopNominal]
scanStartTimes.append(tstamps_cut[0])
scanStopTimes.append(tstamps_cut[-1])
if len(self.freqs) > 0:
freqs.append([start, stop, self.freqs[s][2], 0, None])
# fix last subscan end time
scanStopTimes[-1] = stop
s += 1
# update self:
self.scanStartTimes = scanStartTimes
self.scanStopTimes = scanStopTimes
self.freqs = freqs
def smoothPointing(self, tstep=1, method='cheb'):
"""
Scan-based smoothing of pointing data
"""
if method == 'poly':
# initialise optimal polinomial estimator:
estimator = PolynomialRegression()
_degrees = np.arange(0, 8)
cv_model = GridSearchCV(estimator,
param_grid={'deg': _degrees},
scoring='mean_squared_error')
elif method == 'cheb':
# initialise optimal polinomial estimator:
estimator = ChebyshevRegression()
_degrees = np.arange(0, 10)
cv_model = GridSearchCV(estimator,
param_grid={'deg': _degrees},
scoring='mean_squared_error')
else:
raise Exception('Unknown smoothing method. Use \'poly\' or \'cheb\'')
# pointingsJ2000 = np.empty(shape=(len(self.sta), 0, 2))
# pointingsDate = np.empty(shape=(len(self.sta), 0, 2))
# azels = np.empty(shape=(len(self.sta), 0, 2))
pointingsJ2000 = [[] for _, _ in enumerate(self.sta)]
pointingsDate = [[] for _, _ in enumerate(self.sta)]
azels = [[] for _, _ in enumerate(self.sta)]
# iterate over scans, as fits must be scan-based
for start, stop in zip(self.scanStartTimes, self.scanStopTimes):
dd0 = datetime.datetime(start.year, start.month, start.day)
# time scale and proper indices to make fit
time = np.array([
( ii, t.hour*3600 + t.minute*60.0 + t.second +
(t-dd0).days*86400.0 )
for ii, t in enumerate(self.tstamps)
if start <= t <= stop ])
# time scale used for smoothing:
t_dense = np.arange(time[0, 1], time[-1, 1]+tstep, tstep)
# renorm
time[:, 1] = 24.0*time[:, 1]/86400.0
t_dense = 24.0*t_dense/86400.0
# iterate over stations:
for jj, _ in enumerate(self.sta):
if len(self.pointingsJ2000) > 0:
pointingsJ2000_scan = []
# iterate over coordinates
for ii in range(self.pointingsJ2000.shape[2]):
ind = map(int, time[:, 0])
# treat RA separately (fix if necessary)
if ii == 0:
cv_model.fit(time[:, 1], np.unwrap(self.pointingsJ2000[ind, jj, ii]))
# wrap back if necessary:
wrap = np.angle(np.exp(1j*cv_model.predict(t_dense)))
negative = wrap < 0
wrap[negative] += 2*np.pi
pointingsJ2000_scan.append(wrap)
else:
cv_model.fit(time[:, 1], self.pointingsJ2000[ind, jj, ii])
pointingsJ2000_scan.append(cv_model.predict(t_dense))
try:
pointingsJ2000[jj] = np.vstack((pointingsJ2000[jj],
np.array(pointingsJ2000_scan).T))
except:
pointingsJ2000[jj] = np.array(pointingsJ2000_scan).T
if len(self.pointingsDate) > 0:
pointingsDate_scan = []
for ii in range(self.pointingsDate.shape[2]):
ind = map(int, time[:, 0])
if ii == 0:
# fix RA if necessary
cv_model.fit(time[:, 1], np.unwrap(self.pointingsDate[ind, jj, ii]))
wrap = np.angle(np.exp(1j * cv_model.predict(t_dense)))
negative = wrap < 0
wrap[negative] += 2 * np.pi
pointingsDate_scan.append(wrap)
else:
cv_model.fit(time[:, 1], self.pointingsDate[ind, jj, ii])
pointingsDate_scan.append(cv_model.predict(t_dense))
try:
pointingsDate[jj] = np.vstack((pointingsDate[jj],
np.array(pointingsDate_scan).T))
except:
pointingsDate[jj] = np.array(pointingsDate_scan).T
if len(self.azels) > 0:
azels_scan = []
for ii in range(self.azels.shape[2]):
ind = map(int, time[:, 0])
if ii == 0:
cv_model.fit(time[:, 1], np.unwrap(self.azels[ind, jj, ii] * np.pi / 180.0))
wrap = np.angle(np.exp(1j * cv_model.predict(t_dense)))
negative = wrap < 0
wrap[negative] += 2 * np.pi
# overwrapped = wrap > 2 * np.pi
# wrap[overwrapped] -= 2 * np.pi
azels_scan.append(wrap)
else:
cv_model.fit(time[:, 1], self.azels[ind, jj, ii] * np.pi / 180.0)
azels_scan.append(cv_model.predict(t_dense))
try:
azels[jj] = np.vstack((azels[jj], np.array(azels_scan).T*180.0/pi))
except:
azels[jj] = np.array(azels_scan).T*180.0/pi
# update
self.pointingsJ2000 = np.swapaxes(np.array(pointingsJ2000), 0, 1)
self.pointingsDate = np.swapaxes(np.array(pointingsDate), 0, 1)
self.azels = np.swapaxes(np.array(azels), 0, 1)
# resample tstamps and freqs:
self.resample(tstep=tstep)
def smoothDude(self, tstep=1, method='polyLocal'):
"""
Scan-based smoothing of DUDE data
"""
if method == 'poly':
# initialise optimal polinomial estimator:
estimator = PolynomialRegression()
_degrees = np.arange(0, 8)
cv_model = GridSearchCV(estimator,
param_grid={'deg': _degrees},
scoring='mean_squared_error')
elif method == 'cheb':
# initialise optimal polinomial estimator:
estimator = ChebyshevRegression()
_degrees = np.arange(0, 10)
cv_model = GridSearchCV(estimator,
param_grid={'deg': _degrees},
scoring='mean_squared_error')
elif method == 'polyLocal':
# number of point to use
points = 3
poly = 2
else:
raise Exception('Unknown smoothing method. Use \'poly\' or \'cheb\'')
# iterate over scans, as fits must be scan-based
delay_smooth = []
uvw_smooth = []
doppler_smooth = []
for start, stop in zip(self.scanStartTimes, self.scanStopTimes):
dd0 = datetime.datetime(start.year, start.month, start.day)
# time scale and proper indices to make fit
time = np.array([
(ii, t.hour * 3600 + t.minute * 60.0 + t.second +
(t - dd0).days * 86400.0)
for ii, t in enumerate(self.tstamps)
if start <= t <= stop])
# time scale used for smoothing:
t_dense = np.arange(time[0, 1], time[-1, 1] + tstep, tstep)
# renorm
time[:, 1] = 24.0 * time[:, 1] / 86400.0
t_dense = 24.0 * t_dense / 86400.0
if len(self.dude.delay) > 0:
# make optimal fit to each of delay 'components'
delay_smooth_scan = []
for ii in range(self.dude.delay.shape[1]):
# time[:,0] - indices of current scan in full-len tstamps
# time[:,1] - time stamps in the flesh
ind = map(int, time[:, 0])
if method != 'polyLocal':
cv_model.fit(time[:, 1], self.dude.delay[ind, ii])
# print [grid_score.mean_validation_score for \
# grid_score in cv_model.grid_scores_]
delay_smooth_scan.append(cv_model.predict(t_dense))
else:
local_fit = localfit(time[:, 1], self.dude.delay[ind, ii], t_dense,
points=points, poly=poly)
delay_smooth_scan.append(local_fit)
try:
delay_smooth = np.vstack((delay_smooth,
np.array(delay_smooth_scan).T))
except:
delay_smooth = np.array(delay_smooth_scan).T
if len(self.dude.uvw) > 0:
uvw_smooth_scan = []
for ii in range(self.dude.uvw.shape[1]):
ind = map(int, time[:, 0])
if method != 'polyLocal':
cv_model.fit(time[:, 1], self.dude.uvw[ind, ii])
uvw_smooth_scan.append(cv_model.predict(t_dense))
else:
local_fit = localfit(time[:, 1], self.dude.uvw[ind, ii], t_dense,
points=points, poly=poly)
uvw_smooth_scan.append(local_fit)
try:
uvw_smooth = np.vstack((uvw_smooth, np.array(uvw_smooth_scan).T))
except:
uvw_smooth = np.array(uvw_smooth_scan).T
if len(self.dude.doppler) > 0:
doppler_smooth_scan = []
for ii in range(self.dude.doppler.shape[1]):
ind = map(int, time[:, 0])
if method != 'polyLocal':
cv_model.fit(time[:, 1], self.dude.doppler[ind, ii])
doppler_smooth_scan.append(cv_model.predict(t_dense))
else:
local_fit = localfit(time[:, 1], self.dude.doppler[ind, ii], t_dense,
points=points, poly=poly)
doppler_smooth_scan.append(local_fit)
try:
doppler_smooth = np.vstack((doppler_smooth, np.array(doppler_smooth_scan).T))
except:
doppler_smooth = np.array(doppler_smooth_scan).T
# print '___'
self.dude.delay = delay_smooth
self.dude.uvw = uvw_smooth
self.dude.doppler = doppler_smooth
# resample tstamps and freqs:
self.resample(tstep=tstep)
'''
#==============================================================================
# Container for Delays, Uvws, Doppler and Etc.
#==============================================================================
'''
class dude(object):
"""
the class contains values of Delays, Uvws, Doppler and Etc.
calculated for a given obs-object
"""
def __init__(self):
# initialise lists for delays, uvws, dopplers
self.delay = []
self.uvw = []
self.doppler = []
# def __init__(self, delay_calc=False, uvw_calc=False, doppler_calc=False):
# self.delay_calc = delay_calc
# self.uvw_calc = uvw_calc
# self.doppler_calc = doppler_calc
# # initialise lists for delays, uvws, dopplers
# if self.delay_calc:
# self.delay = []
# if self.uvw_calc:
# self.uvw = []
# if self.doppler_calc:
# self.doppler = []
'''
#==============================================================================
# Constants
#==============================================================================
'''
class constants(object):
"""
Physical constants used in the code.
IMPORTANT!!! TDB->TCB->TT
to convert to TCB-compatible values, the appropriate TDB-compatible mass
value has to be multiplied by (1+L_B)
then to get the TT-compatible values, the TCB-compatible mass should be
multiplied by (1-L_G)
"""
def __init__(self, jpl_eph='421'):
# jpl_eph used can be 405 or 421, the latter is the default
# math consts
self.CDEGRAD = 1.7453292519943296e-02
self.CARCRAD = 4.8481368110953599e-06
self.CTIMRAD = 7.2722052166430399e-05
self.SECDAY = 86400.0
self.JUL_CENT = 36525.0
# JULIAN DATE OF STANDARD EPOCH J2000.0
self.JD2000 = 2451545.0
# Algemeene Physical constants
self.C = 2.99792458e8
self.C_km = 2.99792458e5
# self.F = 298.25765 # the tide-free value
self.F = 298.25642 # the tide-free value, IERS2010
self.AE = 6378136.3 # the tide-free value
self.J_2 = 1.0826e-3
# self.AU = 149597870691.0
self.AU = 149597870700.0 # DE430
self.TAUA = 499.0047838061
self.G = 6.67428e-11
# from tn36; also see http://ilrs.gsfc.nasa.gov/docs/2014/196C.pdf
self.L_B = 1.550519768e-8
self.L_C = 1.48082686741e-8
self.L_G = 6.969290134e-10
# DE/LE405 Header. TDB-compatible!!
if '403' in jpl_eph:
AU_DE405 = 1.49597870691000015e+11 #m
self.GSUN = 0.295912208285591095e-03*(AU_DE405)**3/(86400.0)**2
self.MU = (0.813005600000000044e+02)**(-1)
self.GEARTH = 0.899701134671249882e-09*(AU_DE405)**3/(86400.0)**2 / (1+self.MU)
self.GMOON = self.GEARTH*self.MU
self.GMPlanet = [0.491254745145081187e-10*(AU_DE405)**3/(86400.0)**2,
0.724345248616270270e-09*(AU_DE405)**3/(86400.0)**2,
0.954953510577925806e-10*(AU_DE405)**3/(86400.0)**2,
0.282534590952422643e-06*(AU_DE405)**3/(86400.0)**2,
0.845971518568065874e-07*(AU_DE405)**3/(86400.0)**2,
0.129202491678196939e-07*(AU_DE405)**3/(86400.0)**2,
0.152435890078427628e-07*(AU_DE405)**3/(86400.0)**2,
0.218869976542596968e-11*(AU_DE405)**3/(86400.0)**2]
if '405' in jpl_eph:
AU_DE405 = 1.49597870691000015e+11 #m
self.GSUN = 0.295912208285591095e-03*(AU_DE405)**3/(86400.0)**2
self.MU = (0.813005600000000044e+02)**(-1)
self.GEARTH = 0.899701134671249882e-09*(AU_DE405)**3/(86400.0)**2 / (1+self.MU)
self.GMOON = self.GEARTH*self.MU
self.GMPlanet = [0.491254745145081187e-10*(AU_DE405)**3/(86400.0)**2,
0.724345248616270270e-09*(AU_DE405)**3/(86400.0)**2,
0.954953510577925806e-10*(AU_DE405)**3/(86400.0)**2,
0.282534590952422643e-06*(AU_DE405)**3/(86400.0)**2,
0.845971518568065874e-07*(AU_DE405)**3/(86400.0)**2,
0.129202491678196939e-07*(AU_DE405)**3/(86400.0)**2,
0.152435890078427628e-07*(AU_DE405)**3/(86400.0)**2,
0.218869976542596968e-11*(AU_DE405)**3/(86400.0)**2]
# DE/LE421 Header. TDB-compatible!!
if '421' in jpl_eph:
AU_DE421 = 1.49597870699626200e+11 #m
self.GSUN = 0.295912208285591100e-03*(AU_DE421)**3/(86400.0)**2
self.MU = (0.813005690699153000e+02)**(-1)
self.GEARTH = 0.899701140826804900e-09*(AU_DE421)**3/(86400.0)**2 / (1+self.MU)
self.GMOON = self.GEARTH*self.MU
self.GMPlanet = [0.491254957186794000e-10*(AU_DE421)**3/(86400.0)**2,
0.724345233269844100e-09*(AU_DE421)**3/(86400.0)**2,
0.954954869562239000e-10*(AU_DE421)**3/(86400.0)**2,
0.282534584085505000e-06*(AU_DE421)**3/(86400.0)**2,
0.845970607330847800e-07*(AU_DE421)**3/(86400.0)**2,
0.129202482579265000e-07*(AU_DE421)**3/(86400.0)**2,
0.152435910924974000e-07*(AU_DE421)**3/(86400.0)**2,
0.217844105199052000e-11*(AU_DE421)**3/(86400.0)**2]
# DE/LE430 Header. TDB-compatible!!
if '430' in jpl_eph:
AU_DE430 = 1.49597870700000000e+11 # m
self.GSUN = 0.295912208285591100e-03 * AU_DE430 ** 3 / 86400.0 ** 2
self.MU = 0.813005690741906200e+02 ** (-1)
self.GEARTH = 0.899701139019987100e-09 * AU_DE430 ** 3 / 86400.0 ** 2 / (1 + self.MU)
self.GMOON = self.GEARTH * self.MU
self.GMPlanet = [0.491248045036476000e-10 * AU_DE430 ** 3 / 86400.0 ** 2,
0.724345233264412000e-09 * AU_DE430 ** 3 / 86400.0 ** 2,
0.954954869555077000e-10 * AU_DE430 ** 3 / 86400.0 ** 2,
0.282534584083387000e-06 * AU_DE430 ** 3 / 86400.0 ** 2,
0.845970607324503000e-07 * AU_DE430 ** 3 / 86400.0 ** 2,
0.129202482578296000e-07 * AU_DE430 ** 3 / 86400.0 ** 2,
0.152435734788511000e-07 * AU_DE430 ** 3 / 86400.0 ** 2,
0.217844105197418000e-11 * AU_DE430 ** 3 / 86400.0 ** 2]
# INPOP13c Header. TDB-compatible!!
if '13c' in jpl_eph:
AU_13c = 1.495978707000000e+11
self.GSUN = 0.2959122082912712e-03*(AU_13c)**3/(86400.0)**2
self.MU = (0.8130056945994197e+02)**(-1)
self.GEARTH = 0.8997011572788968e-09*(AU_13c)**3/(86400.0)**2 / (1+self.MU)
self.GMOON = self.GEARTH*self.MU
self.GMPlanet = [0.4912497173300158e-10*(AU_13c)**3/(86400.0)**2,
0.7243452327305554e-09*(AU_13c)**3/(86400.0)**2,
0.9549548697395966e-10*(AU_13c)**3/(86400.0)**2,
0.2825345791109909e-06*(AU_13c)**3/(86400.0)**2,
0.8459705996177680e-07*(AU_13c)**3/(86400.0)**2,
0.1292024916910406e-07*(AU_13c)**3/(86400.0)**2,
0.1524357330444817e-07*(AU_13c)**3/(86400.0)**2,
0.2166807318808926e-11*(AU_13c)**3/(86400.0)**2]
self.TDB_TCB = (1.0+self.L_B) # F^-1
# G*masses in TCB-frame!
self.GM_TCB = np.hstack([self.GMPlanet[0:2], self.GEARTH,\
self.GMPlanet[2:], self.GMOON, self.GSUN]) * self.TDB_TCB
# G*masses in TDB-frame!
self.GM = np.hstack([self.GMPlanet[0:2], self.GEARTH,\
self.GMPlanet[2:], self.GMOON, self.GSUN])
'''
#==============================================================================
#
#==============================================================================
'''
class site(object):
'''
Class containing site-specific data for a station:
- station name
- position
- velocity
- thermal def coeffs and antenna axis offsets
- ocean loading parameters
- atmospheric loading parameters
- azimuth/elevation angles in case of a S/C obs
'''
def __init__(self, name):
self.name = name.strip()
# crds in terrestrial RF at J2000.0:
self.r_GTRS = np.zeros(3)
self.v_GTRS = np.zeros(3)
# crds in terrestrial RF at the epoch of obs (acc for tectonic plate motion):
self.r_GTRS_date = np.zeros(3)
# crds in celestial geocentric RF at J2000.0:
self.r_GCRS = np.zeros(3)
self.v_GCRS = np.zeros(3)
self.a_GCRS = np.zeros(3)
# crds in celestial barycentric RF at J2000.0:
self.r_BCRS = np.zeros(3)
self.v_BCRS = np.zeros(3)
self.a_BCRS = np.zeros(3)
# geodetic data:
self.sph_rad = 0.0 # site spherical radius
self.lat_gcen = 0.0 # site spherical radius
self.lon_gcen = 0.0 # site spherical radius
self.lat_geod = 0.0 # site spherical radius
self.h_geod = 0.0 # site spherical radius
# distance from the Earth spin axis and from the equatorial plane (in km):
self.u = 0.0
self.v = 0.0
# VEN-to-crust-fixed rotation matrix:
self.vw = np.identity(3)
self.eta = np.zeros(3)
self.theta = 0.0
self.R_E = np.zeros(3) # Pierce point with the Earth ellipsoid
# antennae Thermal deformations coefficients + some auxiliary info,
# e.g. mount type:
self.ivs_name = ''
self.focus_type = ''
self.mount_type = ''
self.radome = ''
self.meas_type = ''
self.T0 = 0.0
self.sin_T = 0.0
self.cos_T = 0.0
self.h0 = 0.0
self.ant_diam = 0.0
self.hf = 0.0
self.df = 0.0
self.gamma_hf = 0.0
self.hp = 0.0
self.gamma_hp = 0.0
self.AO = 0.0
self.gamma_AO = 0.0
self.hv = 0.0
self.gamma_hv = 0.0
self.hs = 0.0
self.gamma_hs = 0.0
# ocean loading coeffitients:
self.amp_ocean = np.zeros((11,3))
self.phs_ocean = np.zeros((11,3))
# Atmospheric loading displacement:
# !!! not implemented!
if 1==0:
self.amp_atm = np.zeros((9,3))
# azimuth/elevation of a S/C or a source
# self.azel = np.zeros(2)
self.azel_interp = []
# meteo data:
# Petrov:
# self.spd = {'tai': [], 'grid': [], 'delay_dry': [], 'delay_wet': []}
self.spd = {'tai': [], 'elv_cutoff': [], 'delay_dry': [], 'delay_wet': []}
# Vienna:
self.met = {'mjd': [], 'ahz': [], 'awz': [], 'zhz': [], 'zwz': [], 'TC': [],
'pres': [], 'hum': [], 'gnh': [], 'geh': [], 'gnw': [], 'gew': []}
# print self.met
# interpolants for meteo data:
self.fMet = {'fT':[], 'fP':[], 'fH':[],\
'fAhz':[], 'fAwz':[], 'fZhz':[], 'fZwz':[],\
'fGnh':[], 'fGeh':[], 'fGnw':[], 'fGew':[]}
## displacements in GCRS due to different phenomena:
# solid Earth tides:
self.dr_tide = np.zeros(3)
self.dv_tide = np.zeros(3)
# ocean loading:
self.dr_oclo = np.zeros(3)
self.dv_oclo = np.zeros(3)
# pole tide:
self.dr_poltide = np.zeros(3)
self.dv_poltide = np.zeros(3)
# atmospheric loading:
self.dr_atlo = np.zeros(3)
self.dv_atlo = np.zeros(3)
## delays due to instrumental and propagation effects
# thermal deformation of telescope:
self.dtau_therm = 0.0
# axis offset:
self.dtau_ao = 0.0
# troposphere:
self.dtau_tropo = 0.0
# ionosphere:
self.dtau_iono = 0.0
def geodetic(self, const):
'''
Calculate the site position in geodetic coordinate systems
for the stations participating in the current observation.
Transformation VW from local geodetic coordinate system (Vertical,East,North)
to the Earth-fixed coordinate system is calculated for each cite.
For transformation to geodetic coordinate system the
Reference Ellipsoid from Table 1.1, IERS 2010 Conventions is used.
The geophysical values are those for the "zero-frequency" tide system.
NOTE !!! The ITRF2000 is "conventional tide free crust" system.
Input -
1. sta - list of site-objects
2. const - physical constants
Output to each site-object:
lat_geod - The geodetic latitude at each site (RAD)
h_geod - The geodetic height of each site. (M)
lat_gcen - The geocentric latitude at each site. (RAD)
lon_gcen - The geocentric east longitude at each site. (RAD)
(0 <= lon_gcen <= 2*pi)
sph_rad - The site spherical radius (M)
u - The stations distance from the Earth spin axis (KM)
v - The stations distance from the equatorial plane(KM)
vw - The transformation matrix of the VEN geodetic
system to the Earth-fixed coordinate system
'''
# IERS 2010
AE = 6378136.3 # m
F = 298.25642
if self.name == 'RA' or self.name == 'GEOCENTR':
return
# Compute the site spherical radius
self.sph_rad = sqrt( sum(x*x for x in self.r_GTRS_date) )
# Compute geocentric latitude
self.lat_gcen = asin( self.r_GTRS_date[2] / self.sph_rad )
# Compute geocentric longitude
self.lon_gcen = atan2( self.r_GTRS_date[1], self.r_GTRS_date[0] )
if self.lon_gcen < 0.0:
self.lon_gcen = self.lon_gcen + 2.0*pi
# Compute the stations distance from the Earth spin axis and
# from the equatorial plane (in KM)
req = sqrt( sum(x*x for x in self.r_GTRS_date[:-1]) )
self.u = req*1e-3
self.v = self.r_GTRS_date[2]*1e-3
# Compute geodetic latitude and height.
# The geodetic longitude is equal to the geocentric longitude
self.lat_geod, self.h_geod = self.geoid( req, self.r_GTRS_date[2], \
AE, F )
# Compute the local VEN-to-crust-fixed rotation matrices by rotating
# about the geodetic latitude and the longitude.
# w - rotation matrix by an angle lat_geod around the y axis
w = self.R_123(2, self.lat_geod)
# v - rotation matrix by an angle -lon_gcen around the z axis
v = self.R_123(3, -self.lon_gcen)
# product of the two matrices:
self.vw = np.dot(v, w)
# WGS84 Earth ellipsoid:
# a = 6378.1370 # km semi-major axis
# b = a*(1 - 1/298.257223563) # km semi-minor axis
# f = (a-b)/a = 1/298.257223563
# IERS 2010 Ellipsoid:
a = AE
b = a*(1-1/F)
e = np.sqrt(a**2-b**2)/a
R = self.r_GTRS_date
# initial approximation. Formula (43a), p. 501
sq = np.sqrt( R[0]**2 + R[1]**2 + (1-e**2)*(R[2]**2) )
R_E = np.array([a*R[0]/sq, a*R[1]/sq, a*R[2]*(1-e**2)/sq])
# iterative process to solve for the Earth diameter at the site
R_E_old = np.zeros(3) # initialise
ii = 0
i_max = 5
while (norm(R_E-R_E_old) > 1e-9) and (ii < i_max):
R_E_old = np.copy(R_E) #for comparison
# (21a), p.498
r_s_shtrih = np.array([R[0]-R_E[0]*(e**2), R[1]-R_E[1]*(e**2), R[2]])
# (21a), p.498
R_s_shtrih = np.array([R_E[0]*(1-e**2), R_E[1]*(1-e**2), R_E[2]])
# (22a), p.498
eta = r_s_shtrih / norm(r_s_shtrih)
# (32), p.499
theta = R_s_shtrih[0]/r_s_shtrih[0]
# new XeYeZe, (33a), p.499
R_E = np.array([theta*(R[0] - (e**2)*R_E[0])/(1-e**2),\
theta*(R[1] - (e**2)*R_E[1])/(1-e**2), theta*R[2]])
ii += 1
self.eta = eta
self.theta = theta
self.R_E = R_E
@staticmethod
def geoid(r, z, a, fr):
'''
Transform Cartesian to geodetic coordinates
based on the exact solution (Borkowski,1989)
Input variables :
r, z = equatorial [m] and polar [m] components
Output variables:
fi, h = geodetic coord's (latitude [rad], height [m])
IERS ellipsoid: semimajor axis (a) and inverse flattening (fr)
'''
if z>=0.0:
b = abs(a - a/fr)
else:
b = -abs(a - a/fr)
E = ((z + b)*b/a - a)/r
F = ((z - b)*b/a + a)/r
# Find solution to: t**4 + 2*E*t**3 + 2*F*t - 1 = 0
P = (E*F + 1.0)*4.0/3.0;
Q = (E*E - F*F)*2.0
D = P*P*P + Q*Q
if D >= 0.0:
s = sqrt(D) + Q
if s>=0:
s = abs(exp(log(abs(s))/3.0))
else:
s = -abs(exp(log(abs(s))/3.0))
v = P/s - s
# Improve the accuracy of numeric values of v
v = -(Q + Q + v*v*v)/(3.0*P)
else:
v = 2.0*sqrt(-P)*cos(acos(Q/P/sqrt(-P))/3.0)
G = 0.5*(E + sqrt(E*E + v))
t = sqrt(G*G + (F - v*G)/(G + G - E)) - G
fi = atan((1.0 - t*t)*a/(2.0*b*t))
h = (r - a*t)*cos(fi) + (z - b)*sin(fi)
return fi, h
@staticmethod
def R_123 (i, theta):
'''
function R_123 creates a matrix 'R_i' which describes
a right rotation by an angle 'THETA' about coordinate axis 'I'
Input variables:
1. I - The number which determines the rotation axis.
(I = 1, 2, 3 corresponds X, Y, and Z axes respectfully)
2. THETA - The rotation angle. (RAD)
Output variables:
1. R(3,3) - The 3x3 rotation matrix. (UNITLESS)
'''
r = np.zeros((3,3))
c = cos(theta)
s = sin(theta)
if i==1:
#Rotation around the X-axis:
# ( 1 0 0 )
# R(X) = ( 0 C S )
# ( 0 -S C )
r[0,0] = 1.0
r[1,0] = 0.0
r[2,0] = 0.0
r[0,1] = 0.0
r[1,1] = +c
r[2,1] = -s
r[0,2] = 0.0
r[1,2] = +s
r[2,2] = +c
elif i==2:
#Rotation around the Y-axis:
# ( C 0 -S )
# R(Y) = ( 0 1 0 )
# ( S 0 C )
r[0,0] = +c
r[1,0] = 0.0
r[2,0] = +s
r[0,1] = 0.0
r[1,1] = 1.0
r[2,1] = 0.0
r[0,2] = -s
r[1,2] = 0.0
r[2,2] = +c
elif i==3:
#Rotation around the Z-axis:
# ( C S 0 )
# R(Z) = (-S C 0 )
# ( 0 0 1 )
r[0,0] = +c
r[1,0] = -s
r[2,0] = 0.0
r[0,1] = +s
r[1,1] = +c
r[2,1] = 0.0
r[0,2] = 0.0
r[1,2] = 0.0
r[2,2] = 1.0
return r
def AzEl2(self, gcrs, utc, JD, t_1, r2000, jpl_eph):
'''
Calculate topocentric [Elevation, Azimuth] of the S/C
input:
gcrs - eph.gcrs
utc - eph.UT in decimal days
t_1 - obs epoch in decimal days
'''
const = constants()
C = const.C # m/s
GM = const.GM
precision = 1e-12
n_max = 3
lag_order = 5
# initial approximation:
nn = 0
lt_01_tmp = 0.0
astropy_t_1 = Time(JD + t_1/86400.0, format='jd', scale='utc', precision=9)
eph_t_0 = datetime.datetime(*map(int, gcrs[0,:3]))
# first time stamp negative?
if utc[0]<0:
eph_t_0 += datetime.timedelta(days=1)
# cut eph and it's tomorrow?
if utc[0]//1 > 0:
eph_t_0 -= datetime.timedelta(days=utc[0]//1)
dd = (astropy_t_1.datetime - eph_t_0).days
# print utc, JD, t_1, dd
x, _ = lagint(lag_order, utc, gcrs[:,6], t_1+dd)
y, _ = lagint(lag_order, utc, gcrs[:,7], t_1+dd)
z, _ = lagint(lag_order, utc, gcrs[:,8], t_1+dd)
R_0_0 = np.hstack((x,y,z))
lt_01 = norm(R_0_0 - self.r_GCRS)/C
mjd = JD - 2400000.5
astropy_t_0 = Time(mjd + t_1 - lt_01/86400.0, \
format='mjd', scale='utc', precision=9)
t_0_0 = astropy_t_0.tdb.jd2
''' BCRS state vectors of celestial bodies at JD+CT, [m, m/s]: '''
## Earth:
rrd = pleph(JD+t_0_0, 3, 12, jpl_eph)
earth = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
## Sun:
rrd = pleph(JD+t_0_0, 11, 12, jpl_eph)
sun = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
## Moon:
rrd = pleph(JD+t_0_0, 10, 12, jpl_eph)
moon = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
state_ss = []
for jj in (1,2,4,5,6,7,8,9):
rrd = pleph(JD+t_0_0, jj, 12, jpl_eph)
state_ss.append(np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3)
state_ss.insert(2, earth)
state_ss.append(moon)
state_ss.append(sun)
while (abs(lt_01 - lt_01_tmp) > precision) and (nn < n_max):
lt_01_tmp = lt_01
t_0 = t_1 - lt_01/86400.0
# print 't_0 =', t_0
x, _ = lagint(lag_order, utc, gcrs[:,6], t_0+dd)
y, _ = lagint(lag_order, utc, gcrs[:,7], t_0+dd)
z, _ = lagint(lag_order, utc, gcrs[:,8], t_0+dd)
vx, _ = lagint(lag_order, utc, gcrs[:,9], t_0+dd)
vy, _ = lagint(lag_order, utc, gcrs[:,10], t_0+dd)
vz, _ = lagint(lag_order, utc, gcrs[:,11], t_0+dd)
R_0 = np.hstack((x,y,z))
V_0 = np.hstack((vx,vy,vz))
# vector needed for RLT calculation
# R_01 = -(self.r_GCRS - R_0) ## WTF, Mityaj???
R_01 = self.r_GCRS - R_0
RLT = 0.0
for ii, state in enumerate(state_ss):
if ii==2 and norm(self.r_GCRS)==0.0: continue
rb = state[:,0]
vb = state[:,1]
R_0_B = R_0 - (rb - (t_0-t_0_0)*86400.0*vb)
R_1_B = self.r_GCRS - rb
R_01_B = R_1_B - R_0_B
RLT += (2.0*GM[ii]/C**3) * \
log( ( norm(R_0_B) + norm(R_1_B) + norm(R_01_B) + \
2.0*GM[ii]/C**2 ) / \
( norm(R_0_B) + norm(R_1_B) - norm(R_01_B) + \
2.0*GM[ii]/C**2 ) )
lt_01 = lt_01 - (lt_01 - norm(R_01)/C - RLT) / \
( 1.0 - np.dot(R_01, V_0)/(C*norm(R_01)) )
# RLT = 0.0
#
# lt_01 = lt_01 - (lt_01 - norm(R_01)/C - RLT) / \
# ( 1.0 - np.dot(R_01, V_0)/(C*norm(R_01)) )
t_0 = t_1 - lt_01/86400.0
nn += 1
x, _ = lagint(lag_order, utc, gcrs[:,6], t_0+dd)
y, _ = lagint(lag_order, utc, gcrs[:,7], t_0+dd)
z, _ = lagint(lag_order, utc, gcrs[:,8], t_0+dd)
# it's still station-centric!
r = -(self.r_GCRS - np.hstack((x,y,z)))
# S/C position is given at a moment LT seconds ago, which
# means r is abberated in the far-field case sense
# normalise aberrated vector:
K_unit_aber = r / norm(r)
# compute the rotation matrix which rotates from the geocentric
# crust fixed system to the VEN system
# Rotate the aberrated vector to the crust fixed system:
Crust_star = np.dot(r2000.T, K_unit_aber)
ven_star = np.dot(self.vw.T, Crust_star)
el = np.arcsin( ven_star[0] )
az = np.arctan2(ven_star[1], ven_star[2])
if az < 0.0:
az += 2.0*np.pi
return az, el, lt_01
def AzEl(self, gtrs, JD, UTC, t_obs, jpl_eph, interpolants=False):
'''
Calculate topocentric [Elevation, Azimuth] of the S/C
on the basis of lt corrected GC SC ephemeris
input:
gtrs/UT - target ITRF ephemeris
[t_obs] - if this is set, method returns Az/El at epoch
otherwise, constructs Az/El series corresponding
to eph.gtrs
t_obs should be in decimal days
'''
raise Exception('Depricated. Use AzEl2 instead.')
if self.name=='GEOCENTR' or self.name=='RA':
return
const = constants()
C = const.C
GM = const.GM
R_1 = self.r_GTRS_date
UT = np.copy(UTC)
UT *= 86400.0
# check input, create list with epochs:
try:
len(t_obs) # this will fail if t_obs is a number
epochs = 86400.0*np.array(t_obs)
except:
epochs = np.array([86400.0*t_obs])
# compute LTs from the target to the station,
# then correct the ITRF eph for these LTs using linear interpolation
n_max = 3
lag_order = 5
r_lt = [] # lt-to-station-corrected s/c positions
mjd = JD - 2400000.5
for jj, t_1 in enumerate(epochs):
# tic = _time()
# R_0_0 = gtrs[jj,6:9]
# V_0_0 = gtrs[jj,9:12]
# A_0_0 = gtrs[jj,12:15]
# dummy acceleration=0 for RA/GNSS (lt is too short to bother..):
# if len(A_0_0)==0: A_0_0 = np.zeros(3)
x, _ = lagint(3, UT, gtrs[:,6], t_1)
y, _ = lagint(3, UT, gtrs[:,7], t_1)
z, _ = lagint(3, UT, gtrs[:,8], t_1)
R_0_0 = np.hstack((x,y,z))
lt_01 = norm(R_1 - R_0_0)/C
astropy_t_0 = Time(mjd + (t_1 - lt_01)/86400.0, \
format='mjd', scale='utc', precision=9)
t_0_0 = astropy_t_0.tdb.jd2
''' BCRS state vectors of celestial bodies at JD+CT, [m, m/s]: '''
## Earth:
rrd = pleph(JD+t_0_0, 3, 12, jpl_eph)
earth = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
## Sun:
rrd = pleph(JD+t_0_0, 11, 12, jpl_eph)
sun = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
## Moon:
rrd = pleph(JD+t_0_0, 10, 12, jpl_eph)
moon = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
state_ss = []
for jj in (1,2,4,5,6,7,8,9):
rrd = pleph(JD+t_0_0, jj, 12, jpl_eph)
state_ss.append(np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3)
state_ss.insert(2, earth)
state_ss.append(moon)
state_ss.append(sun)
numlt = max(15.0, 2.1*lt_01)
mini = max(np.searchsorted(UT, t_1-numlt)-1, 0)
maxi = np.searchsorted(UT, t_1+numlt)
# make n_max iterations
for nn in range(n_max):
t_0 = t_1 - lt_01 # in sec
# R_0 = R_0_0 - lt_01*V_0_0 + (lt_01)**2*A_0_0/2.0
# V_0 = V_0_0 - lt_01*A_0_0
x, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,6], t_0)
y, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,7], t_0)
z, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,8], t_0)
R_0 = np.hstack((x,y,z))
vx, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,9], t_0)
vy, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,10], t_0)
vz, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,11], t_0)
V_0 = np.hstack((vx,vy,vz))
# vector needed for RLT calculation
R_01 = R_1 - R_0
RLT = 0.0
for ii, state in enumerate(state_ss):
if ii==2 and norm(R_1)==0.0: continue
rb = state[:,0]
vb = state[:,1]
R_0_B = R_0 - (rb - (t_0-t_0_0*86400.0)*vb)
R_1_B = R_1 - rb
R_01_B = R_1_B - R_0_B
RLT += (2.0*GM[ii]/C**3) * \
log( ( norm(R_0_B) + norm(R_1_B) + norm(R_01_B) + \
2.0*GM[ii]/C**2 ) / \
( norm(R_0_B) + norm(R_1_B) - norm(R_01_B) + \
2.0*GM[ii]/C**2 ) )
lt_01 = lt_01 - (lt_01 - norm(R_01)/C - RLT) / \
( 1.0 - np.dot(R_01, V_0)/(C*norm(R_01)) )
t_0 = t_1 - lt_01
nn += 1
# R_0 = R_0_0 - lt_01*V_0_0 + (lt_01**2)*A_0_0/2.0
x, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,6], t_0)
y, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,7], t_0)
z, _ = lagint(lag_order, UT[mini:maxi], gtrs[mini:maxi,8], t_0)
R_0 = np.hstack((x,y,z))
r_lt.append(R_0)
# print lt_01
# print _time()-tic
#
# print '__'
r_lt = np.array(r_lt)
# np.set_printoptions(precision=16)
# print r_lt[0]
# print R_1
## iterative eta + (phi,lambda,h) determination
# print 'uuu'
# tic = _time()
eta = self.eta
## (H, A) PROPER CALCULATION
# elevation
rho = r_lt - R_1
H = []
for rh in rho:
H.append(np.arcsin( np.dot(rh, eta) / ( norm(rh) ) ))
H = np.array(H).T
# azimuth from North to East
Z = np.array([0.0, 0.0, 1.0])
E = np.cross(Z, R_1) / norm(np.cross(Z, R_1))
N = np.cross(eta, E) / norm(np.cross(eta, E))
A = []
for rh in rho:
A.append(np.arctan2(np.dot(rh, E), np.dot(rh, N)))
if A[-1]<0.0: A[-1] += 2.0*np.pi
A = np.array(A).T
# self.azel = np.array(np.vstack((A, H))).T
# print A, H
# raw_input()
if not interpolants:
if len(epochs)==1:
return A[0], H[0]
else:
return A, H
else:
# store interpolant of these - for faster access later
fA = sp.interpolate.interp1d(epochs/86400.0, A)
fH = sp.interpolate.interp1d(epochs/86400.0, H)
self.azel_interp = [fA, fH]
# print _time()-tic
def LT_radec_bc(self, bcrs, tdb, JD_UTC, t_1_UTC, jpl_eph):
"""
Calculate station-centric LTs and LT-corrected ra/decs
input:
bcrs - eph.bcrs
tdb - eph.CT in decimal days
JD_UTC, t_1_UTC - obs epoch in decimal days, TDB scale
"""
const = constants()
C = const.C # m/s
GM = const.GM
precision = 1e-12
n_max = 3
lag_order = 5
# zero point
astropy_t_1 = Time(JD_UTC, t_1_UTC, format='jd', scale='utc', precision=9).tdb
mjd_full = astropy_t_1.mjd
mjd = np.floor(mjd_full)
# t_1_coarse = mjd_full - mjd
JD = mjd + 2400000.5
astropy_t_1_day_start = Time(JD, format='jd', scale='tdb', precision=9)
t_1 = (astropy_t_1-astropy_t_1_day_start).jd
t_start_day = datetime.datetime(*map(int, bcrs[0, :3]))
dd = (astropy_t_1.datetime - t_start_day).total_seconds() // 86400
# print astropy_t_1.tdb.datetime, t_start_day, dd, JD, t_1
# initial approximation:
nn = 0
lt_01_tmp = 0.0
x, _ = lagint(lag_order, tdb, bcrs[:, 6], t_1+dd)
y, _ = lagint(lag_order, tdb, bcrs[:, 7], t_1+dd)
z, _ = lagint(lag_order, tdb, bcrs[:, 8], t_1+dd)
R_0_0 = np.hstack((x, y, z))
# check if self.r_BCRS is set
## Earth:
rrd = pleph(JD + t_1, 3, 12, jpl_eph)
earth = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
# low accuracy is good enough for this application:
self.r_BCRS = earth[:, 0] + self.r_GCRS
lt_01 = norm(R_0_0 - self.r_BCRS)/C
''' BCRS state vectors of celestial bodies at JD+CT, [m, m/s]: '''
state_ss = []
for jj in (1, 2, 4, 5, 6, 7, 8, 9, 10, 11):
rrd = pleph(astropy_t_1.jd, jj, 12, jpl_eph)
state_ss.append(np.reshape(np.asarray(rrd), (3, 2), 'F') * 1e3)
while (abs(lt_01 - lt_01_tmp) > precision) and (nn < n_max):
lt_01_tmp = lt_01
t_0 = t_1 - lt_01/86400.0
# print t_0
x, _ = lagint(lag_order, tdb, bcrs[:, 6], t_0+dd)
y, _ = lagint(lag_order, tdb, bcrs[:, 7], t_0+dd)
z, _ = lagint(lag_order, tdb, bcrs[:, 8], t_0+dd)
vx, _ = lagint(lag_order, tdb, bcrs[:, 9], t_0+dd)
vy, _ = lagint(lag_order, tdb, bcrs[:, 10], t_0+dd)
vz, _ = lagint(lag_order, tdb, bcrs[:, 11], t_0+dd)
R_0 = np.hstack((x, y, z))
V_0 = np.hstack((vx, vy, vz))
# vector needed for RLT calculation
# R_01 = -(self.r_GCRS - R_0) ## WTF, Mityaj???
R_01 = self.r_BCRS - R_0
RLT = 0.0
for j, ii in enumerate((1, 2, 4, 5, 6, 7, 8, 9, 10, 11)):
rrd = pleph(JD + t_0, ii, 12, jpl_eph)
state = np.reshape(np.asarray(rrd), (3, 2), 'F') * 1e3
R_B = state[:,0]
R_0_B = R_B - R_0
R_1_B = state_ss[j][:,0] - self.r_BCRS
R_01_B = R_1_B - R_0_B
# print ii, R_0, rb, vb, t_0, t_0_0
RLT += (2.0*GM[ii-1]/C**3) * \
log( ( norm(R_0_B) + norm(R_1_B) + norm(R_01_B) + 2.0*GM[ii-1]/C**2 ) /
( norm(R_0_B) + norm(R_1_B) - norm(R_01_B) + 2.0*GM[ii-1]/C**2 ) )
lt_01 = lt_01 - (lt_01 - norm(R_01)/C - RLT) / \
( 1.0 - np.dot(R_01, V_0)/(C*norm(R_01)) )
t_0 = t_1 - lt_01/86400.0
nn += 1
x, _ = lagint(lag_order, tdb, bcrs[:, 6], t_0+dd)
y, _ = lagint(lag_order, tdb, bcrs[:, 7], t_0+dd)
z, _ = lagint(lag_order, tdb, bcrs[:, 8], t_0+dd)
# it's still station-centric!
r = -(self.r_BCRS - np.hstack((x, y, z)))
ra = np.arctan2(r[1], r[0]) # right ascension
dec = np.arctan(r[2]/np.sqrt(r[0]**2+r[1]**2)) # declination
if ra < 0:
ra += 2.0*np.pi
# print ra, dec
# print ra*12/np.pi, dec*180/np.pi
# raw_input()
# self.lt = lt_01
# self.ra = ra
# self.dec = dec
return lt_01, ra, dec
def addMet(self, date_start, date_stop, inp):
"""
Load Petrov site and VMF1 site/gridded meteo data.
Files with these should be pre-downloaded with doup() function.
input:
date_start - datetime object with start date
date_stop - datetime object with start date
inp - input settings (cats etc.)
"""
if self.name == 'GEOCENTR' or self.name == 'RA':
return
day_start = datetime.datetime(date_start.year, date_start.month, date_start.day)
day_stop = datetime.datetime(date_stop.year, date_stop.month, date_stop.day) + datetime.timedelta(days=1)
dd = (day_stop - day_start).days
# make list with datetime objects ranging from 1st day to last+1
dates = [day_start]
for d in range(1, dd+1):
dates.append(day_start + datetime.timedelta(days=d))
for di, day in enumerate(dates):
year = day.year
doy = day.timetuple().tm_yday # day of year
''' try Petrov first: '''
try:
for hh in range(0, 24, 3):
spd_file = 'spd_geosfpit_{:s}_{:02d}00.spd'.format(day.strftime('%Y%m%d'), hh)
with open(os.path.join(inp['meteo_cat'], spd_file), 'r') as f:
f_lines = f.readlines()
# get stations:
spd_stations = {int(ss.split()[1]): ss.split()[2] for ss in f_lines if
ss[0] == 'S' and ss.split()[0] == 'S'}
if self.name not in spd_stations.values():
raise Exception('Station {:s} not in spd-file'.format(self.name))
# get grid:
spd_elv = {int(ss.split()[1]): float(ss.split()[2]) for ss in f_lines if
ss[0] == 'E' and ss.split()[0] == 'E'}
spd_azi = {int(ss.split()[1]): float(ss.split()[2]) for ss in f_lines if
ss[0] == 'A' and ss.split()[0] == 'A'}
# make 2d grid for interpolation:
spd_elv_grid, spd_azi_grid = sorted(spd_elv.values())[::-1], sorted(spd_azi.values())
# get relevant entries:
d_f_lines = [map(int, l.split()[2:4]) + map(float, l.replace('D-', 'e-').split()[4:])
for l in f_lines if l[0] == 'D' and l.split()[0] == 'D' and
spd_stations[int(l.split()[1])] == self.name]
# different grid:
spd_grid = [[spd_elv_grid[entry[0] - 1], spd_azi_grid[entry[1] - 1]] for entry in d_f_lines]
spd_grid_delay_dry = [entry[2] for entry in d_f_lines]
spd_grid_delay_wet = [entry[3] for entry in d_f_lines]
# load into self:
self.spd['tai'].append(Time(str(day + datetime.timedelta(hours=hh)),
format='iso', scale='tai').mjd)
# self.spd['grid'].append(spd_grid)
self.spd['elv_cutoff'].append(np.min(np.array(spd_grid)[:, 0]))
# linear 2d interpolation:
# dry_interp = sp.interpolate.LinearNDInterpolator(spd_grid, spd_grid_delay_dry)
# wet_interp = sp.interpolate.LinearNDInterpolator(spd_grid, spd_grid_delay_wet)
# piecewise cubic 2d interpolation:
dry_interp = sp.interpolate.CloughTocher2DInterpolator(spd_grid, spd_grid_delay_dry)
wet_interp = sp.interpolate.CloughTocher2DInterpolator(spd_grid, spd_grid_delay_wet)
self.spd['delay_dry'].append(dry_interp)
self.spd['delay_wet'].append(wet_interp)
# for the last 'day', get the first file only:
if di == len(dates) - 1:
break
except Exception as _e:
print(_e)
''' VMF1: '''
try:
vmf_file = '{:4d}{:03d}.vmf1_r'.format(year, doy)
# vmf1 site files:
with open(os.path.join(inp['meteo_cat'], vmf_file), 'r') as f:
f_lines = f.readlines()
# if met data are present in the vmf1 site file, load 'em
if any(self.name in s.split() for s in f_lines):
noSiteData = False
for f_line in f_lines:
if self.name in f_line.split():
tmp = [float(i) for i in f_line[10:].split()]
self.met['mjd'].append(tmp[0])
self.met['ahz'].append(tmp[1])
self.met['awz'].append(tmp[2])
self.met['zhz'].append(tmp[3])
self.met['zwz'].append(tmp[4])
self.met['pres'].append(tmp[6])
self.met['TC'].append(tmp[7])
wvp = tmp[8]
TK = tmp[7] + 273.15
ew = 10**(10.79574*(1-273.16/TK) -
5.028*np.log10(TK/273.16) +
1.50475e-4*(1-10**(8.2969*(1-TK/273.16))) +
0.42873e-3*(10**(4.76955*(1-TK/273.16))-1) + 0.78614)
self.met['hum'].append(100.0*wvp/ew)
# if met data are not there, load gridded data
else:
noSiteData = True
except Exception as _e:
noSiteData = False
print(_e)
if noSiteData: # if met data are not there, load gridded data
try:
print 'Meteo data for '+self.name+\
' not found. Using VMF1 grids instead.'
lat = np.arange(90,-92,-2)
lon = np.arange(0,359,2.5)
# grid_lat, grid_lon = np.meshgrid(lat,lon)
# for each day load data
for day in dates:
# mjd:
yy = day.year
mm = day.month
dd = day.day
if mm <= 2: #January & February
yy = yy - 1.0
mm = mm + 12.0
jd = floor( 365.25*(yy + 4716.0)) + \
floor( 30.6001*( mm + 1.0)) + 2.0 - \
floor( yy/100.0 ) + \
floor( floor( yy/100.0 )/4.0 ) + dd - 1524.5
mjd = jd - 2400000.5
for hh in (0.0, 6.0, 12.0, 18.0):
self.met['mjd'].append(mjd + hh/24.0)
vmf_grid_file = 'VMFG_{:4d}{:02d}{:02d}.H'.\
format(day.year, day.month, day.day)
# load gridded data
for hh in ('00', '06', '12', '18'):
# vmf1 grid files:
with open(os.path.join(inp['meteo_cat'], vmf_grid_file+hh), 'r') as f:
f_lines = f.readlines()
f_lines = [line for line in f_lines if line[0]!='!']
ahz_grid = []
awz_grid = []
zhz_grid = []
zwz_grid = []
grid = []
for f_line in f_lines:
tmp = [float(i) for i in f_line.split()]
grid.append([tmp[0], tmp[1]])
ahz_grid.append(tmp[2])
awz_grid.append(tmp[3])
zhz_grid.append(tmp[4])
zwz_grid.append(tmp[5])
ahz_grid = np.array(ahz_grid)
awz_grid = np.array(awz_grid)
zhz_grid = np.array(zhz_grid)
zwz_grid = np.array(zwz_grid)
f = sp.interpolate.interp2d(lon, lat, \
ahz_grid.reshape((91,144)))
ahz = f(self.lon_gcen, self.lat_geod)[0]
f = sp.interpolate.interp2d(lon, lat, \
awz_grid.reshape((91,144)))
awz = f(self.lon_gcen, self.lat_geod)[0]
f = sp.interpolate.interp2d(lon, lat, \
zhz_grid.reshape((91,144)))
zhz = f(self.lon_gcen, self.lat_geod)[0]
f = sp.interpolate.interp2d(lon, lat, \
zwz_grid.reshape((91,144)))
zwz = f(self.lon_gcen, self.lat_geod)[0]
self.met['ahz'].append(ahz)
self.met['awz'].append(awz)
self.met['zhz'].append(zhz)
self.met['zwz'].append(zwz)
# convert lists of one-valued array into arrays
self.met['ahz'] = np.array(self.met['ahz'])
self.met['awz'] = np.array(self.met['awz'])
self.met['zhz'] = np.array(self.met['zhz'])
self.met['zwz'] = np.array(self.met['zwz'])
except Exception as _e:
print(_e)
## lhg files with precomputed site-specific data (tropo gradients):
for day in dates:
year = day.year
doy = day.timetuple().tm_yday # day of year
try:
# lhg site files:
lhg_file = '{:4d}{:03d}.lhg_r'.format(year, doy)
with open(inp['meteo_cat']+'/'+lhg_file,'r') as f:
f_lines = f.readlines()
# if met data are present in the lhg site file, load 'em
if any(self.name in s for s in f_lines):
for f_line in f_lines:
# if self.name in f_line.split(' '):
if self.name in f_line.split():
tmp = [float(i) for i in f_line[10:].split()]
self.met['gnh'].append(tmp[1])
self.met['geh'].append(tmp[2])
self.met['gnw'].append(tmp[3])
self.met['gew'].append(tmp[4])
except Exception as _e:
print(_e)
## now make interpolants for a faster access:
try:
self.fMet['fAhz'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['ahz'], kind='cubic')
self.fMet['fAwz'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['awz'], kind='cubic')
self.fMet['fZhz'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['zhz'], kind='cubic')
self.fMet['fZwz'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['zwz'], kind='cubic')
if len(self.met['TC'])>0:
self.fMet['fT'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['TC'])
self.fMet['fP'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['pres'])
self.fMet['fH'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['hum'])
if len(self.met['gnh'])>0:
self.fMet['fGnh'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['gnh'], kind='cubic')
self.fMet['fGeh'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['geh'], kind='cubic')
self.fMet['fGnw'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['gnw'], kind='cubic')
self.fMet['fGew'] = sp.interpolate.interp1d(self.met['mjd'],
self.met['gew'], kind='cubic')
except Exception as _e:
print(_e)
# print self.met
def j2000gp(self, r2000, gcrs=None, utc=None, t=None):
'''
Compute station GCRS state.
Add up displacements due to geophysical effects to it.
'''
if self.name=='GEOCENTR':
# keep the zeros
return
elif self.name=='RA':
# interpolate RA's GCRS orbit to date
# for jj in range(0,3):
# self.r_GCRS[jj] = sp.interpolate.splev(t,fGcrs[jj],der=0)
# for jj in range(3,6):
# self.v_GCRS[jj-3] = sp.interpolate.splev(t,fGcrs[jj],der=0)
# for jj in range(6,9):
# self.a_GCRS[jj-6] = sp.interpolate.splev(t,fGcrs[jj],der=0)
lag_order = 9
x, _ = lagint(lag_order, utc, gcrs[:,6], t)
y, _ = lagint(lag_order, utc, gcrs[:,7], t)
z, _ = lagint(lag_order, utc, gcrs[:,8], t)
vx, _ = lagint(lag_order, utc, gcrs[:,9], t)
vy, _ = lagint(lag_order, utc, gcrs[:,10], t)
vz, _ = lagint(lag_order, utc, gcrs[:,11], t)
self.r_GCRS = np.hstack((x,y,z))
self.v_GCRS = np.hstack((vx,vy,vz))
try:
ax, _ = lagint(lag_order, utc, gcrs[:,12], t)
ay, _ = lagint(lag_order, utc, gcrs[:,13], t)
az, _ = lagint(lag_order, utc, gcrs[:,14], t)
self.a_GCRS = np.hstack((ax,ay,az))
except:
self.a_GCRS = np.zeros(3)
else:
# transformation GTRS -> GCRS
self.r_GCRS = np.dot(r2000[:,:,0], self.r_GTRS_date)
self.v_GCRS = np.dot(r2000[:,:,1], self.r_GTRS_date)
self.a_GCRS = np.dot(r2000[:,:,2], self.r_GTRS_date)
# geophysics:
# print self.dr_tide
# print self.dr_oclo
# print self.dr_poltide
# print self.dv_tide
# print self.dv_oclo
# print self.dv_poltide
# raw_input()
# print self.lat_geod, self.lon_gcen, self.h_geod
self.r_GCRS += self.dr_tide + self.dr_oclo +\
self.dr_poltide + self.dr_atlo
self.v_GCRS += self.dv_tide + self.dv_oclo +\
self.dv_poltide + self.dv_atlo
# def thermal_def(self, alpha, delta, elv, T, C):
# '''
# thermal_def computes delta_tau due to the
# thermal deformation effect of the telescope
#
# input:
# self - site object
# alpha, delta, elv, T - right ascention, declination, elevation,
# air temperature in C
# const
# output delay:
# dt_thermal
# '''
#
# dl = -0.12*np.pi/180.0
# phi0 = 39.06*np.pi/180.0
#
# # Antenna focus factor
# if self.focus_type == 'FO_PRIM':
# Fa = 0.9
# else:
# Fa = 1.8
#
# # Alt-azimuth
# if self.mount_type == 'MO_AZEL':
# dt_thermal = ( self.gamma_hf * (T - self.T0) * (self.hf * sin(elv)) + \
# self.gamma_hp * (T - self.T0) * (self.hp * sin(elv) + \
# self.AO * cos(elv) + self.hv - Fa * self.hs) ) / C
# # Equatorial
# elif self.mount_type == 'MO_EQUA':
# dt_thermal = ( self.gamma_hf * (T - self.T0) * (self.hf * sin(elv)) + \
# self.gamma_hp * (T - self.T0) * (self.hp * sin(elv) + \
# self.AO * cos(delta) + self.hv - Fa * self.hs) ) / C
# # XY north
# elif self.mount_type == 'MO_XYNO':
# dt_thermal = ( self.gamma_hf * (T - self.T0) * (self.hf * sin(elv)) + \
# self.gamma_hp * (T - self.T0) * (self.hp * sin(elv) + \
# self.AO * sqrt( 1.0 - cos(elv)*cos(elv)*cos(alpha)*cos(alpha) ) + \
# self.hv - Fa * self.hs) ) / C
# # XY east
# elif self.mount_type == 'MO_XYEA':
# dt_thermal = ( self.gamma_hf * (T - self.T0) * (self.hf * sin(elv)) + \
# self.gamma_hp * (T - self.T0) * (self.hp * sin(elv) + \
# self.AO * sqrt( 1.0 - cos(elv)*cos(elv)*cos(alpha)*cos(alpha) ) + \
# self.hv - Fa * self.hs) ) / C
# # misplaced equatorial RICHMOND
# elif self.mount_type == 'MO_RICH':
# dt_thermal = ( self.gamma_hf * (T - self.T0) * (self.hf * sin(elv)) + \
# self.gamma_hp * (T - self.T0) * (self.hp * sin(elv) + \
# self.AO * sqrt( 1.0 - ( sin(elv)*sin(phi0) + \
# cos(elv)*cos(phi0)*(cos(alpha)*cos(dl) + sin(alpha)*sin(dl)) )**2 ) + \
# self.hv - Fa * self.hs) ) / C
#
# self.dtau_therm = dt_thermal
'''
#==============================================================================
#
#==============================================================================
'''
class source(object):
'''
Class containing a far-field source-specific data:
- source name
- source IVS-name
- ra/dec
- source type (C - calibrator, far-field
R - RadioAstron, near-field
G - GNSS, near-field
S - ESA's deep space s/c, near-field)
'''
def __init__(self, name, sou_type):
self.name = name
self.ivsname = name
self.sou_type = sou_type
self.radec = [] # in h m s
self.ra = 0.0 # in rad
self.dec = 0.0 # in rad
self.K_s = np.zeros(3) # J2000.0 source unit vector
'''
#==============================================================================
#
#==============================================================================
'''
class ephem(object):
"""
Class containing spacecraft ephemerides
in GTRS, GCRS and BCRS
"""
def __init__(self, sc_name):
self.sc_name = sc_name
self.gtrs = np.array([]) # empty numpy array
self.gcrs = np.array([]) # empty numpy array
# self.bcrs = np.array([]) # empty numpy array
self.bcrs = [] # empty list
self.fGtrs = [] # spline interpolant
self.fGcrs = [] # spline interpolant
self.fBcrs = [] # spline interpolant, it will be a list of lists
self.UT = np.array([]) # decimal time stamps for gtrs and gcrs
self.CT = np.array([]) # decimal time stamps for bcrs
self.CT_sec = np.array([]) # [s] time stamps for bcrs
# self.lt_gc = np.array([]) # geocentric LTs
# self.radec = np.array([]) # geocentric LT-corrected alpha/delta
# self.fRadec = []
# self.fLt_gc = 0.0
def RaDec_bc_sec(self, jd, T_1, jpl_eph, return_R10=False):
"""
Calculate geocentric [Ra, Dec] of the S/C
input:
JD - Julian Date
t_1 - obs epoch in decimal days, TDB
return_R10 - return vector R_GC - R_SC in BCRS or not
output:
ra, dec [rad]
lt_01 [seconds]
"""
JD = deepcopy(jd)
t_1 = deepcopy(T_1)
const = constants()
C = const.C # m/s
GM = const.GM
# must go below 1 ps to stop iterating:
precision = 1e-13
# but should do so in no more than n_max iterations:
n_max = 3
# lagrange poly order for interpolation
lag_order = 5
# initial approximation:
nn = 0
lt_01_tmp = 0.0
bcrs = self.bcrs[0]
tdb = self.CT*86400.0
# correct 'overnighter'
if t_1 >= 86400.0:
JD += 1
t_1 -= 86400.0
astropy_t_1 = Time(JD + t_1/86400.0, format='jd', scale='tdb', precision=9)
eph_t_0 = datetime.datetime(*map(int, bcrs[0, :3]))
# first time stamp negative?
if tdb[0] < 0:
eph_t_0 += datetime.timedelta(days=1)
# cut eph and it's tomorrow?
if tdb[0]//86400 > 0:
eph_t_0 -= datetime.timedelta(days=tdb[0]//86400)
# print astropy_t_1.datetime, eph_t_0
dd = (astropy_t_1.datetime - eph_t_0).days
# print 'dd = ', dd
# print tdb, JD, t_1, dd, '\n'
# print 't_1 = {:.18f}'.format(t_1)
x, _ = lagint(lag_order, tdb, bcrs[:, 6], t_1+dd*86400)
y, _ = lagint(lag_order, tdb, bcrs[:, 7], t_1+dd*86400)
z, _ = lagint(lag_order, tdb, bcrs[:, 8], t_1+dd*86400)
R_0 = np.hstack((x,y,z))
# print 'R_0 = {:.18f} {:.18f} {:.18f}'.format(*R_0)
## Earth:
rrd = pleph(astropy_t_1.jd, 3, 12, jpl_eph)
earth = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
R_1 = earth[:,0]
# print 'R_1 = {:.18f} {:.18f} {:.18f}'.format(*R_1)
R_0_0 = R_0 - R_1
# print 'R_0_0 =', R_0_0
lt_01 = norm(R_0_0)/C
# print 'lt_01 =', lt_01
mjd = JD - 2400000.5
# print mjd, t_1, lt_01
astropy_t_0 = Time(mjd + (t_1 - lt_01)/86400.0, format='mjd', scale='tdb', precision=9)
# print astropy_t_0.datetime
''' BCRS! state vectors of celestial bodies at JD+CT, [m, m/s]: '''
state_ss = []
for jj in (1,2,4,5,6,7,8,9,10,11):
rrd = pleph(astropy_t_1.jd, jj, 12, jpl_eph)
state_ss.append(np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3)
# back to UTC!:
# t_0_0 = t_1 - lt_01
while (abs(lt_01 - lt_01_tmp) > precision) and (nn < n_max):
lt_01_tmp = lt_01
t_0 = t_1 - lt_01
astropy_t_0 = Time(mjd + t_0/86400.0, format='mjd', scale='tdb', precision=9)
# print astropy_t_0.datetime
# dd = (astropy_t_0.datetime - eph_t_0).days
# print t_0, dd
x, _ = lagint(lag_order, tdb, bcrs[:,6], t_0+dd*86400)
y, _ = lagint(lag_order, tdb, bcrs[:,7], t_0+dd*86400)
z, _ = lagint(lag_order, tdb, bcrs[:,8], t_0+dd*86400)
vx, _ = lagint(lag_order, tdb, bcrs[:,9], t_0+dd*86400)
vy, _ = lagint(lag_order, tdb, bcrs[:,10], t_0+dd*86400)
vz, _ = lagint(lag_order, tdb, bcrs[:,11], t_0+dd*86400)
R_0 = np.hstack((x,y,z))
V_0 = np.hstack((vx,vy,vz))
# vector needed for RLT calculation
R_01 = R_1 - R_0
# print '(t_0-t_0_0)=', (t_0-t_0_0)
RLT = 0.0
for j, ii in enumerate((1,2,4,5,6,7,8,9,10,11)):
rrd = pleph(astropy_t_0.jd, ii, 12, jpl_eph)
state = np.reshape(np.asarray(rrd), (3,2), 'F') * 1e3
R_B = state[:,0]
R_0_B = R_B - R_0
R_1_B = state_ss[j][:,0] - R_1
R_01_B = R_1_B - R_0_B
# print ii, R_0, rb, vb, t_0, t_0_0
RLT += (2.0*GM[ii-1]/C**3) * \
log( ( norm(R_0_B) + norm(R_1_B) + norm(R_01_B) + \
2.0*GM[ii-1]/C**2 ) / \
( norm(R_0_B) + norm(R_1_B) - norm(R_01_B) + \
2.0*GM[ii-1]/C**2 ) )
# print 'RLT=', RLT
lt_01 = lt_01 - (lt_01 - norm(R_01)/C - RLT) / \
( 1.0 - np.dot(R_01, V_0)/(C*norm(R_01)) )
# print 'lt_01=', lt_01
t_0 = t_1 - lt_01
nn += 1
x, _ = lagint(lag_order, tdb, bcrs[:, 6], t_0+dd*86400)
y, _ = lagint(lag_order, tdb, bcrs[:, 7], t_0+dd*86400)
z, _ = lagint(lag_order, tdb, bcrs[:, 8], t_0+dd*86400)
R_0 = np.hstack((x, y, z))
r = R_0 - R_1
# print 'R_0 = {:.18f} {:.18f} {:.18f}'.format(*R_0)
# print 'R_0-R_1 = {:.18f} {:.18f} {:.18f}'.format(*r)
# print 'lt_01 = {:.18f}'.format(lt_01)
# raw_input()
# S/C position is given at a moment LT seconds ago, which
# means r is abberated in the far-field case sense
ra = np.arctan2(r[1], r[0]) # right ascension
dec = np.arctan(r[2]/np.sqrt(r[0]**2+r[1]**2)) # declination
if ra < 0:
ra += 2.0*np.pi
# if dec > -1*np.pi/180: raise Exception()
if not return_R10:
return ra, dec, lt_01
else:
return ra, dec, lt_01, r
'''
#==============================================================================
#
#==============================================================================
'''
class ion(object):
"""
Class containing ionospheric TEC-maps
"""
def __init__(self, date_start, date_stop, inp):
self.lat = np.arange(87.5, -90.0, -2.5) # len = 71
self.lon = np.arange(-180.0, 185.0, 5.0) # len = 73
self.date_tec = []
vTEC_grid = []
# set up dates
day_start = datetime.datetime(date_start.year,date_start.month,\
date_start.day,0,0,0)
day_stop = datetime.datetime(date_stop.year,date_stop.month,\
date_stop.day,0,0,0) + datetime.timedelta(days=1)
dd = (day_stop - day_start).days
# make list with datetime objects ranging from 1st day to last+1
dates = [day_start]
for d in range(1,dd+1):
dates.append(day_start+ datetime.timedelta(days=d))
for day in dates:
yy = str(day.year)[2:]
doy = day.timetuple().tm_yday # day of year
ionex = inp['iono_model'] + 'g{:03d}0.'.format(doy) + yy + 'i'
try:
with open(inp['ion_cat']+'/'+ionex,'r') as f:
f_lines = f.readlines()
except Exception, err:
print str(err)
# print 'Failed to load TEC data, no iono delay will be computed.'
raise Exception('Failed to load TEC data, no iono delay will be computed.')
for jj in range(len(f_lines)):
if 'START OF TEC MAP' in f_lines[jj]:
tmp = [int(i) for i in f_lines[jj+1][0:50].split()]
# end of day1==start of day2 => don't include duplicate:
if len(self.date_tec)==0 or \
datetime.datetime(*tmp)!=self.date_tec[-1]:
self.date_tec.append(datetime.datetime(*tmp))
else:
continue
grid = []
for ii in range(jj+2,(jj+2)+71*6,6):
# lat = float(f_lines[ii][0:8])
# you can't transpose a 1D-array, have to first
# convert it to a matrix:
# tmp1 = np.array(np.matrix(lat*np.ones(73)).T)
# tmp2 = np.array(np.matrix(np.arange(-180.0,185.0,5.0)).T)
# tmp = np.hstack((tmp1,tmp2))
tecs = []
for kk in range(1,6):
tmpflt = [float(i) for i in f_lines[ii+kk].split()]
tecs.append(tmpflt)
tecs = [item for sublist in tecs for item in sublist]
# tecs = np.array(np.matrix(tecs).T)
tecs = np.array(tecs)
# tmp = np.hstack((tmp,tecs))
if len(grid)==0:
# grid = tmp
grid = tecs
else:
# grid = np.vstack((grid,tmp))
grid = np.hstack((grid,tecs))
# reshape for interpolation:
vTEC_grid.append(grid.reshape((71,73)))
jj = ii # shift current index
vTEC_grid = np.array(vTEC_grid)
# the resulting array has a shape (dd*12+1,71x73): dd*12+1 epochs
# each containing TEC values on a 2D-grid
# import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set_style('whitegrid') # plot em niice!
# plt.close('all')
# print dd
# for i in range(vTEC_grid.shape[0]):
# plt.figure()
# plt.imshow(vTEC_grid[i,:,:])
# plt.show()
# build a decimal time scale for a more convenient interpolation
# in the future
self.UT_tec = []
for t in self.date_tec:
self.UT_tec.append( (t.hour+t.minute/60.0+t.second/3600.0)/24.0 + \
(t-day_start).days )
# print self.UT_tec
# produce interpolants:
self.fVTEC = []
for vTEC in vTEC_grid:
f = sp.interpolate.interp2d(self.lon, self.lat, vTEC, kind='cubic')
self.fVTEC.append(f)
#%%
'''
#==============================================================================
# Load binary delays
#==============================================================================
'''
class bindel(object):
'''
Parse binary delay files in SFXC format
'''
def __init__(self, fname, fdir='.'):
self.fname = fname
self.fdir = fdir
self.parse()
def parse(self):
self.scans = {}
with open(os.path.join(self.fdir, self.fname),'rb') as f_del:
# content = f_del.read()
# line = struct.unpack('<i2sx', content[:7])
# binary header (header_size, sta_name):
self.header_size = struct.unpack('<i', f_del.read(4))[0]
# (unpack always returns a tuple)
self.sta = struct.unpack('<2sx', f_del.read(self.header_size))[0]
#print self.sta
# unpack scan by scan
sn = 1 # scan number
while True:
try:
source = struct.unpack('<80sx', f_del.read(81))[0]
mjd = struct.unpack('<i', f_del.read(4))[0]
# init dict for scan
self.scans[sn] = {'source':None, 'mjd':None, 'time':None, \
'uvw':None, 'delay':None, \
'phase':None, 'amp':None}
self.scans[sn]['source'] = source
self.scans[sn]['mjd'] = mjd
time, uvw, delay, phase, amp = [], [], [], [], []
while True:
t,u,v,w,d,p,a = struct.unpack('<7d', f_del.read(8*7))
if t==0 and d==0:
break
else:
time.append(t)
uvw.append((u,v,w))
delay.append(d)
phase.append(p)
amp.append(a)
#print np.array(time)
self.scans[sn]['time'] = np.array(time)
self.scans[sn]['uvw'] = np.array(uvw)
self.scans[sn]['delay'] = np.array(delay)
self.scans[sn]['phase'] = np.array(phase)
self.scans[sn]['amp'] = np.array(amp)
sn += 1
except:
break
def getSources(self):
'''
Return list of sources
'''
return list(set([self.scans[sn]['source'].strip() \
for sn in self.scans.keys()]))
def phaseCor(self, source, day, t, f):
'''
Integrate f_gc to get a phase correction for a S/C
input:
source - source name
t - seconds from beginning of the first day of experiment
f - freqs in Hz
'''
# set f_0 - will integrate difference (f-f_0) to reduce phase dyn.range
f_0 = f[0]
for sn in self.scans.keys():
if self.scans[sn]['source'].strip() == source:
at = Time(self.scans[sn]['mjd'], format='mjd', scale='utc')
dt = at.datetime
# same time scale as t:
ts = 86400.0*(dt-day).days + self.scans[sn]['time']
# cut proper piece of (f-f_0) to integrate:
bounds = np.searchsorted(t, (ts[0]+1, ts[-1]))
# print bounds
ti = t[bounds[0]-1 : bounds[1]+1]
fi = f[bounds[0]-1 : bounds[1]+1] - f_0
# if sn==191:
# print ti
# print fi
# raw_input('lala')
# print len(fi)
try:
# # make an optimal polyfit:
# ffit = optimalFit(ti, fi, min_order=3, \
# max_order=7, fit_type='poly')
# # integrate it to a phase poly
# pint = np.polyint(ffit.best_estimator_.coef_)
# # evaluate it at scan start
# i0 = np.polyval(pint, ts[0])
# # the phase is then int(ts[i])-int(ts[0])
# phase = np.array([2.0*np.pi*(np.polyval(pint, to) - i0) \
# for to in ts])
# print phase
# scale ti for a more robust fit:
mu_ti = np.array([np.mean(ti), np.std(ti)])
ti = (ti-mu_ti[0])/mu_ti[1]
# make an optimal polyfit:
ffit = optimalFit(ti, fi, min_order=3, \
max_order=7, fit_type='poly')
# print len(ffit.best_estimator_.coef_)
# integrate it to a phase poly
pint = np.polyint(ffit.best_estimator_.coef_)
# evaluate it at scan start
i0 = np.polyval(pint, (ts[0] - mu_ti[0]) / mu_ti[1])
# the phase is then int(ts[i])-int(ts[0])
phase = np.array([2.0*np.pi*(np.polyval(pint, to) - i0) \
for to in (ts-mu_ti[0])/mu_ti[1]])*mu_ti[1]
# print phase
# raw_input('ololo?')
# store it:
self.scans[sn]['phase'] = phase
except:
# too few points to make a fit - skip this scan then
continue
def dump(self, binary=True, txt=False, out_name=None, out_dir=None):
'''
Dump parsed (and processed) data back to a binary .del-file
'''
if out_name is None:
dot = self.fname.index('.')
out_name = self.fname[:dot] + 'i.del'
# out_name = self.fname
if txt:
dot = self.fname.index('.')
out_name_txt = self.fname[:dot] + '.txt'
if out_dir is None:
out_dir = self.fdir
# create output dir if it doesn't exist:
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
if binary:
with open(os.path.join(out_dir, out_name),'wb') as f_del:
# binary header (header_size, sta_name):
line = struct.pack('<i2sx', self.header_size, self.sta)
f_del.write(line)
for sn in self.scans.keys():
# source name and mjd
line = struct.pack('<80sxi', self.scans[sn]['source'],
self.scans[sn]['mjd'])
f_del.write(line)
for t, (u,v,w), d, p, a in zip(self.scans[sn]['time'],
self.scans[sn]['uvw'], self.scans[sn]['delay'],
-self.scans[sn]['phase'], self.scans[sn]['amp']):
# np.zeros_like(self.scans[sn]['delay']), self.scans[sn]['amp']):
line = struct.pack('<7d', t,u,v,w,d,p,a)
f_del.write(line)
# trailing zeros at scan end:
line = struct.pack('<7d', *list(np.zeros(7)))
f_del.write(line)
# dump txt:
if txt:
with open(os.path.join(out_dir, out_name_txt),'w') as f_txt:
# binary header (header_size, sta_name):
line = '{:s}\n'.format(self.sta)
f_txt.write(line)
for sn in self.scans.keys():
# source name and mjd
line = '{:s} {:f}\n'.format(self.scans[sn]['source'].strip(),
self.scans[sn]['mjd'])
f_txt.write(line)
for t, (u,v,w), d, p, a in zip(self.scans[sn]['time'],
self.scans[sn]['uvw'], self.scans[sn]['delay'],
-self.scans[sn]['phase'], self.scans[sn]['amp']):
# np.zeros_like(self.scans[sn]['delay']), self.scans[sn]['amp'])
line = '{:f} {:f} {:f} {:f} {:.15e} {:.15e} {:.15e}\n'.format(t,
u,v,w,d,p,a)
f_txt.write(line)
| gpl-2.0 |
nhejazi/scikit-learn | sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/gaussian_process/gpr.py | 9 | 18634 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of ``Gaussian Processes
for Machine Learning'' (GPML) by Rasmussen and Williams.
In addition to standard sklearn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of self.kernel_.theta
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of self.kernel_.theta is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
samzhang111/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 81 | 5461 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
huobaowangxi/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
SixTrack/SixTrack | test/elensidealthin6d/elens_plot_kick.py | 1 | 1663 | import matplotlib.pyplot as plt
import numpy as np
r2hel1=6.928 # from fort.3 [mm]
sig=r2hel1/6 # 1 sigma beam size, hel1 between 4-6 sigma
offsetx=-1.1547
offsety=-2.3093
theta_r2=4.920e-03 # max. kick [mrad]
oFile=open('kicks.dat','w')
plt.figure('elens kick',figsize=(13,13))
for fnin,fnout,offx,offy,R,R2f,peakT in [(1,2,0,0,0.5,7,7),(2,3,offsetx,offsety,1,12,10.8),(3,4,-offsetx,0,1,5,2.91604),(4,5,0,-offsety,1/2.,3,3.48995)]:
theta_max=theta_r2*R
plt.subplot(2,2,fnin)
helin=np.loadtxt('HEL_DUMP_%s'%fnin)
helout=np.loadtxt('HEL_DUMP_%s'%fnout)
rrin=np.sqrt((helin[:,3]-offx)**2+(helin[:,5]-offy)**2)
rrout=np.sqrt((helout[:,3]-offx)**2+(helout[:,5]-offy)**2)
if np.max(rrin-rrout)==0:
fff=np.sqrt((helin[:,4]-helout[:,4])**2+(helin[:,6]-helout[:,6])**2)
plt.plot(rrin/sig,fff,'.',label=r'offx=%2.3f sigma,offy=%2.3f sigma'%(offx/sig,offy/sig))
plt.plot(rrin/sig,np.ones(len(rrin))*theta_max,'k-',label=r'$\theta_{R_2}$')
plt.plot([R2f,R2f],[0,theta_max*1.1],'g-',label=r'$R_2$')
plt.plot([peakT,peakT],[0,max(fff)*1.05],'r-',label=r'$n_{\mathrm{peak}}$')
plt.xlabel(r'$n_{\sigma}=\sqrt{(x-x_{\mathrm{off}})^2+(y-y_{\mathrm{off}})^2)}$ [$\sigma$]')
plt.ylabel(r'$\theta(r)=\sqrt{xp^2+yp^2}$ [mrad]')
plt.legend(loc='best',fontsize=10)
plt.tight_layout()
plt.grid()
oFile.write('# %i %i \n'%(fnin,fnout))
for tmpR,tmpF in zip(rrin,fff):
oFile.write(' % 22.15E % 22.15E % 22.15E \n'%(tmpR,tmpR/sig,tmpF))
oFile.write('\n\n')
else:
print 'x or y has been changed in %s / %s - elens should only change xp,yp'%('HEL_DUMP_%s'%fnin,'HEL_DUMP_%s'%fnout)
oFile.close()
plt.show()
| lgpl-2.1 |
mutirri/bokeh | examples/glyphs/colors.py | 25 | 8920 | from __future__ import print_function
from math import pi
import pandas as pd
from bokeh.models import Plot, ColumnDataSource, FactorRange, CategoricalAxis, TapTool, HoverTool, OpenURL
from bokeh.models.glyphs import Rect
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
css3_colors = pd.DataFrame([
("Pink", "#FFC0CB", "Pink"),
("LightPink", "#FFB6C1", "Pink"),
("HotPink", "#FF69B4", "Pink"),
("DeepPink", "#FF1493", "Pink"),
("PaleVioletRed", "#DB7093", "Pink"),
("MediumVioletRed", "#C71585", "Pink"),
("LightSalmon", "#FFA07A", "Red"),
("Salmon", "#FA8072", "Red"),
("DarkSalmon", "#E9967A", "Red"),
("LightCoral", "#F08080", "Red"),
("IndianRed", "#CD5C5C", "Red"),
("Crimson", "#DC143C", "Red"),
("FireBrick", "#B22222", "Red"),
("DarkRed", "#8B0000", "Red"),
("Red", "#FF0000", "Red"),
("OrangeRed", "#FF4500", "Orange"),
("Tomato", "#FF6347", "Orange"),
("Coral", "#FF7F50", "Orange"),
("DarkOrange", "#FF8C00", "Orange"),
("Orange", "#FFA500", "Orange"),
("Yellow", "#FFFF00", "Yellow"),
("LightYellow", "#FFFFE0", "Yellow"),
("LemonChiffon", "#FFFACD", "Yellow"),
("LightGoldenrodYellow", "#FAFAD2", "Yellow"),
("PapayaWhip", "#FFEFD5", "Yellow"),
("Moccasin", "#FFE4B5", "Yellow"),
("PeachPuff", "#FFDAB9", "Yellow"),
("PaleGoldenrod", "#EEE8AA", "Yellow"),
("Khaki", "#F0E68C", "Yellow"),
("DarkKhaki", "#BDB76B", "Yellow"),
("Gold", "#FFD700", "Yellow"),
("Cornsilk", "#FFF8DC", "Brown"),
("BlanchedAlmond", "#FFEBCD", "Brown"),
("Bisque", "#FFE4C4", "Brown"),
("NavajoWhite", "#FFDEAD", "Brown"),
("Wheat", "#F5DEB3", "Brown"),
("BurlyWood", "#DEB887", "Brown"),
("Tan", "#D2B48C", "Brown"),
("RosyBrown", "#BC8F8F", "Brown"),
("SandyBrown", "#F4A460", "Brown"),
("Goldenrod", "#DAA520", "Brown"),
("DarkGoldenrod", "#B8860B", "Brown"),
("Peru", "#CD853F", "Brown"),
("Chocolate", "#D2691E", "Brown"),
("SaddleBrown", "#8B4513", "Brown"),
("Sienna", "#A0522D", "Brown"),
("Brown", "#A52A2A", "Brown"),
("Maroon", "#800000", "Brown"),
("DarkOliveGreen", "#556B2F", "Green"),
("Olive", "#808000", "Green"),
("OliveDrab", "#6B8E23", "Green"),
("YellowGreen", "#9ACD32", "Green"),
("LimeGreen", "#32CD32", "Green"),
("Lime", "#00FF00", "Green"),
("LawnGreen", "#7CFC00", "Green"),
("Chartreuse", "#7FFF00", "Green"),
("GreenYellow", "#ADFF2F", "Green"),
("SpringGreen", "#00FF7F", "Green"),
("MediumSpringGreen", "#00FA9A", "Green"),
("LightGreen", "#90EE90", "Green"),
("PaleGreen", "#98FB98", "Green"),
("DarkSeaGreen", "#8FBC8F", "Green"),
("MediumSeaGreen", "#3CB371", "Green"),
("SeaGreen", "#2E8B57", "Green"),
("ForestGreen", "#228B22", "Green"),
("Green", "#008000", "Green"),
("DarkGreen", "#006400", "Green"),
("MediumAquamarine", "#66CDAA", "Cyan"),
("Aqua", "#00FFFF", "Cyan"),
("Cyan", "#00FFFF", "Cyan"),
("LightCyan", "#E0FFFF", "Cyan"),
("PaleTurquoise", "#AFEEEE", "Cyan"),
("Aquamarine", "#7FFFD4", "Cyan"),
("Turquoise", "#40E0D0", "Cyan"),
("MediumTurquoise", "#48D1CC", "Cyan"),
("DarkTurquoise", "#00CED1", "Cyan"),
("LightSeaGreen", "#20B2AA", "Cyan"),
("CadetBlue", "#5F9EA0", "Cyan"),
("DarkCyan", "#008B8B", "Cyan"),
("Teal", "#008080", "Cyan"),
("LightSteelBlue", "#B0C4DE", "Blue"),
("PowderBlue", "#B0E0E6", "Blue"),
("LightBlue", "#ADD8E6", "Blue"),
("SkyBlue", "#87CEEB", "Blue"),
("LightSkyBlue", "#87CEFA", "Blue"),
("DeepSkyBlue", "#00BFFF", "Blue"),
("DodgerBlue", "#1E90FF", "Blue"),
("CornflowerBlue", "#6495ED", "Blue"),
("SteelBlue", "#4682B4", "Blue"),
("RoyalBlue", "#4169E1", "Blue"),
("Blue", "#0000FF", "Blue"),
("MediumBlue", "#0000CD", "Blue"),
("DarkBlue", "#00008B", "Blue"),
("Navy", "#000080", "Blue"),
("MidnightBlue", "#191970", "Blue"),
("Lavender", "#E6E6FA", "Purple"),
("Thistle", "#D8BFD8", "Purple"),
("Plum", "#DDA0DD", "Purple"),
("Violet", "#EE82EE", "Purple"),
("Orchid", "#DA70D6", "Purple"),
("Fuchsia", "#FF00FF", "Purple"),
("Magenta", "#FF00FF", "Purple"),
("MediumOrchid", "#BA55D3", "Purple"),
("MediumPurple", "#9370DB", "Purple"),
("BlueViolet", "#8A2BE2", "Purple"),
("DarkViolet", "#9400D3", "Purple"),
("DarkOrchid", "#9932CC", "Purple"),
("DarkMagenta", "#8B008B", "Purple"),
("Purple", "#800080", "Purple"),
("Indigo", "#4B0082", "Purple"),
("DarkSlateBlue", "#483D8B", "Purple"),
("SlateBlue", "#6A5ACD", "Purple"),
("MediumSlateBlue", "#7B68EE", "Purple"),
("White", "#FFFFFF", "White"),
("Snow", "#FFFAFA", "White"),
("Honeydew", "#F0FFF0", "White"),
("MintCream", "#F5FFFA", "White"),
("Azure", "#F0FFFF", "White"),
("AliceBlue", "#F0F8FF", "White"),
("GhostWhite", "#F8F8FF", "White"),
("WhiteSmoke", "#F5F5F5", "White"),
("Seashell", "#FFF5EE", "White"),
("Beige", "#F5F5DC", "White"),
("OldLace", "#FDF5E6", "White"),
("FloralWhite", "#FFFAF0", "White"),
("Ivory", "#FFFFF0", "White"),
("AntiqueWhite", "#FAEBD7", "White"),
("Linen", "#FAF0E6", "White"),
("LavenderBlush", "#FFF0F5", "White"),
("MistyRose", "#FFE4E1", "White"),
("Gainsboro", "#DCDCDC", "Gray/Black"),
("LightGray", "#D3D3D3", "Gray/Black"),
("Silver", "#C0C0C0", "Gray/Black"),
("DarkGray", "#A9A9A9", "Gray/Black"),
("Gray", "#808080", "Gray/Black"),
("DimGray", "#696969", "Gray/Black"),
("LightSlateGray", "#778899", "Gray/Black"),
("SlateGray", "#708090", "Gray/Black"),
("DarkSlateGray", "#2F4F4F", "Gray/Black"),
("Black", "#000000", "Gray/Black"),
], columns=["Name", "Color", "Group"])
source = ColumnDataSource(dict(
names = list(css3_colors.Name),
groups = list(css3_colors.Group),
colors = list(css3_colors.Color),
))
xdr = FactorRange(factors=list(css3_colors.Group.unique()))
ydr = FactorRange(factors=list(reversed(css3_colors.Name)))
plot = Plot(title="CSS3 Color Names", x_range=xdr, y_range=ydr, plot_width=600, plot_height=2000)
rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None)
rect_renderer = plot.add_glyph(source, rect)
xaxis_above = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_above, 'above')
xaxis_below = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_below, 'below')
plot.add_layout(CategoricalAxis(), 'left')
url = "http://www.colors.commutercreative.com/@names/"
tooltips = """Click the color to go to:<br /><a href="{url}">{url}</a>""".format(url=url)
tap = TapTool(plot=plot, renderers=[rect_renderer], action=OpenURL(url=url))
hover = HoverTool(plot=plot, renderers=[rect_renderer], tooltips=tooltips)
plot.tools.extend([tap, hover])
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "colors.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "CSS3 Color Names"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
tedmeeds/tcga_encoder | tcga_encoder/analyses/kmeans_from_z_space_global_learn_survival.py | 1 | 17736 | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
#import scipy.spatial.distance.pdist
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy import stats
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test, multivariate_logrank_test
from tcga_encoder.analyses.survival_functions import *
# cloudy blue #acc2d9
# dark pastel green #56ae57
# dust #b2996e
# electric lime #a8ff04
# fresh green #69d84f
# light eggplant #894585
# nasty green #70b23f
# really light blue #d4ffff
# tea #65ab7c
# warm purple #952e8f
# yellowish tan #fcfc81
# cement #a5a391
# dark grass green #388004
# dusty teal #4c9085
# grey teal #5e9b8a
# macaroni and cheese #efb435
# pinkish tan #d99b82
# spruce #0a5f38
# strong blue #0c06f7
# toxic green #61de2a
# windows blue #3778bf
# blue blue #2242c7
# blue with a hint of purple #533cc6
# booger #9bb53c
# bright sea green #05ffa6
# dark green blue #1f6357
# deep turquoise #017374
# green teal #0cb577
# strong pink #ff0789
# bland #afa88b
# deep aqua #08787f
# lavender pink #dd85d7
# light moss green #a6c875
# light seafoam green #a7ffb5
# olive yellow #c2b709
# pig pink #e78ea5
# deep lilac #966ebd
# desert #ccad60
# dusty lavender #ac86a8
# purpley grey #947e94
# purply #983fb2
# candy pink #ff63e9
# light pastel green #b2fba5
# boring green #63b365
# kiwi green #8ee53f
# light grey green #b7e1a1
# orange pink #ff6f52
# tea green #bdf8a3
# very light brown #d3b683
# egg shell #fffcc4
# eggplant purple #430541
# powder pink #ffb2d0
# reddish grey #997570
# baby shit brown #ad900d
# liliac #c48efd
# stormy blue #507b9c
# ugly brown #7d7103
# custard #fffd78
# darkish pink #da467d
# def get_global_cost( data, w, K, lambda_l1, lambda_l2, idx ):
# cost = 0.0 #lambda_l1*np.sum( np.abs(w) ) + lambda_l2*np.sum(w*w)
# d = data[ idx]
# times = d["times"]
# events = d["events"]
# z = d["z"]
# n = len(times)
# cost = get_cost( times, events, z, w, K, lambda_l1, lambda_l2 )/n
# return cost
def get_global_cost( data, w, K, lambda_l1, lambda_l2, idx ):
cost = 0.0 #lambda_l1*np.sum( np.abs(w) ) + lambda_l2*np.sum(w*w)
for d in data:
times = d["times"]
events = d["events"]
z = d["z"]
cost += get_cost( times, events, z, w, K, lambda_l1, lambda_l2 )
return cost
# get_cost( times, events, z_train, w_delta_plus, K_p, lambda_l1, lambda_l2 )
def get_cost( times, events, z, w, K, lambda_l1, lambda_l2 ):
cost = 0 #lambda_l1*np.sum( np.abs(w) ) + lambda_l2*np.sum(w*w)
ok = pp.find( pp.isnan( times.values) == False )
y = np.dot( z[ok], w )
e = events.values[ok]
t = times.values[ok]
ids = e==1
n=len(t)
K = min( K, int(e.sum() ))
#pdb.set_trace()
#results = stats.spearmanr( y, t )
results = stats.spearmanr( y[ids], t[ids] )
if np.isnan( results.pvalue ):
pdb.set_trace()
#print results
#return cost + np.log( results.pvalue+1e-12 )
#results = stats.spearmanr( y[ids], times.values[ids] )
#return cost + np.sign(results.correlation)*np.log( results.pvalue+1e-12 )
I_splits = survival_splits( e, np.argsort(y), 2 )
bad_order=False
z_score = 0
# for k1 in range(K-1):
# g1 = I_splits[k1]
# g2 = I_splits[k1+1]
# z_score -= logrank_test( t[g1], t[g2], e[g1], e[g2] ).test_statistic
z_score += np.log( logrank_test( t[I_splits[0]], t[I_splits[-1]], e[I_splits[0]], e[I_splits[-1]] ).p_value)
#z_score += np.log( logrank_test( t[I_splits[1]], t[I_splits[-2]], e[I_splits[1]], e[I_splits[-2]] ).p_value)
#z_score -= logrank_test( t[I_splits[2]], t[I_splits[-3]], e[I_splits[2]], e[I_splits[-3]] ).test_statistic
#z_score -= logrank_test( t[g1], t[g2], e[g1], e[g2] ).test_statistic
return cost + (z_score + np.log( results.pvalue+1e-12 ))
# groups = groups_by_splits( len(z), I_splits )
#
# cost_delta_plus = np.log( \
# multivariate_logrank_test( \
# times, \
# groups=groups_by_splits( \
# n_tissue, \
# survival_splits( events, np.argsort(np.dot( z_train, w_delta_plus )), \
# K_p ) ), event_observed=events ).p_value + 1e-12 ) \
# +lambda_l2*np.sum( np.abs(w_delta_plus))
tissue_color_names = ["windows blue", "amber", "greyish", "faded green", "dusty purple",\
"nice blue","rosy pink","sand brown","baby purple",\
"fern","creme","ugly blue","washed out green","squash",\
"cinnamon","radioactive green","cocoa","charcoal grey","indian red",\
"light lavendar","toupe","dark cream" ,"burple","tan green",\
"azul","bruise", "sunny yellow","deep brown","off blue",\
"custard","powder pink","deep lilac","kiwi green","orange pink"]
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
save_dir = os.path.join( results_path, "kmeans_with_z_global_learn_survival7" )
check_and_mkdir(save_dir)
size_per_unit = 0.25
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = Z.index.values
#barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
quantiles = (len(Z)*np.array( [0,0.33, 0.66, 1.0] )).astype(int)
quantiles = (len(Z)*np.array( [0,0.2, 0.4,0.6,0.8,1.0] )).astype(int)
#quantiles = (len(Z)*np.linspace(0,1,61)).astype(int)
n_quantiles = len(quantiles)-1
start_q_id = -(n_quantiles-1)/2
#Z=Z.loc[barcodes]
std_z = Z.values.std(0)
keep_z = pp.find( std_z > 0.0 )
z_names = ["z_%d"%(z_idx) for z_idx in keep_z]
Z = Z[z_names]
n_z = len(z_names)
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z.columns = z_names
#return Z
#pdb.set_trace()
#Z = pd.DataFrame( Z.values / Z.std(1).values[:,np.newaxis], index=Z.index, columns=Z.columns)
Z_values = Z.values
argsort_Z = np.argsort( Z_values, 0 )
Z_quantized = np.zeros( Z_values.shape, dtype=int )
for start_q, end_q in zip( quantiles[:-1], quantiles[1:] ):
for z_idx in range(n_z):
z_idx_order = argsort_Z[:,z_idx]
Z_quantized[ z_idx_order[start_q:end_q], z_idx] = start_q_id
start_q_id+=1
Z_quantized = pd.DataFrame(Z_quantized, index=barcodes, columns=z_names )
Z_quantized.to_csv( save_dir + "/Z_quantized.csv")
Z_quantized=Z
sub_bcs = np.array([ x+"_"+y for x,y in np.array(data_store["/CLINICAL/data"]["patient.stage_event.pathologic_stage"].index.tolist(),dtype=str)] )
sub_values = np.array( data_store["/CLINICAL/data"]["patient.stage_event.pathologic_stage"].values, dtype=str )
subtypes = pd.Series( sub_values, index = sub_bcs, name="subtypes")
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
# -----------------------------
# -----------------------------
ALL_SURVIVAL = data_store["/CLINICAL/data"][["patient.days_to_last_followup","patient.days_to_death","patient.days_to_birth"]]
tissue_barcodes = np.array( ALL_SURVIVAL.index.tolist(), dtype=str )
surv_barcodes = np.array([ x+"_"+y for x,y in tissue_barcodes])
NEW_SURVIVAL = pd.DataFrame( ALL_SURVIVAL.values, index =surv_barcodes, columns = ALL_SURVIVAL.columns )
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#clinical = data_store["/CLINICAL/data"].loc[barcodes]
Age = NEW_SURVIVAL[ "patient.days_to_birth" ].values.astype(int)
Times = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)+NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)
Events = (1-np.isnan( NEW_SURVIVAL[ "patient.days_to_death" ].astype(float)) ).astype(int)
ok_age_query = Age<-10
ok_age = pp.find(ok_age_query )
tissues = tissues[ ok_age_query ]
#pdb.set_trace()
Age=-Age[ok_age]
Times = Times[ok_age]
Events = Events[ok_age]
s_barcodes = barcodes[ok_age]
NEW_SURVIVAL = NEW_SURVIVAL.loc[s_barcodes]
#ok_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values>=0
#ok_followup = pp.find( ok_followup_query )
bad_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)<0
bad_followup = pp.find( bad_followup_query )
ok_followup_query = 1-bad_followup_query
ok_followup = pp.find( ok_followup_query )
bad_death_query = NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)<0
bad_death = pp.find( bad_death_query )
#pdb.set_trace()
Age=Age[ok_followup]
Times = Times[ok_followup]
Events = Events[ok_followup]
s_barcodes = s_barcodes[ok_followup]
NEW_SURVIVAL = NEW_SURVIVAL.loc[s_barcodes]
fill_store.close()
data_store.close()
# S = Z.loc[s_barcodes]
# S["E"] = Events
# S["T"] = Times
# S["Age"] = np.log(Age)
S = pd.DataFrame( np.vstack((Events,Times)).T, index = s_barcodes, columns=["E","T"])
# -----------------------------
# -----------------------------
from sklearn.cluster import MiniBatchKMeans
# print "running kmeans"
# kmeans_patients = MiniBatchKMeans(n_clusters=10, random_state=0).fit(Z_quantized.values)
# kmeans_patients_labels = kmeans_patients.labels_
#
# kmeans_z = MiniBatchKMeans(n_clusters=10, random_state=0).fit(Z_quantized.values.T)
# kmeans_z_labels = kmeans_z.labels_
#
#
# order_labels = np.argsort(kmeans_patients_labels)
# order_labels_z = np.argsort(kmeans_z_labels)
# sorted_Z = pd.DataFrame( Z_quantized.values[order_labels,:], index=Z_quantized.index[order_labels], columns=Z_quantized.columns)
# sorted_Z = pd.DataFrame( sorted_Z.values[:,order_labels_z], index=sorted_Z.index, columns = sorted_Z.columns[order_labels_z] )
n = len(Z)
n_tissues = len(tissue_names)
K_p = 4
K_z = 10
k_pallette = sns.hls_palette(K_p)
data = []
for t_idx in range(n_tissues):
#t_idx=1
tissue_name = tissue_names[t_idx]
print "working %s"%(tissue_name)
t_ids_cohort = tissue_idx == t_idx
n_tissue = np.sum(t_ids_cohort)
if n_tissue < 1:
print "skipping ",tissue_name
continue
Z_cohort = Z_quantized[ t_ids_cohort ]
bcs = barcodes[t_ids_cohort]
S_cohort = S.loc[bcs]
events = S_cohort["E"]
times = S_cohort["T"]
ok = np.isnan(times.values)==False
bcs = S_cohort.index.values[ok]
Z_cohort = Z_cohort.loc[bcs]
S_cohort = S_cohort.loc[bcs]
events = S_cohort["E"]
times = S_cohort["T"]
z_train = Z_cohort.values
data.append( {"tissue":tissue_name, "barcodes":bcs,"z":z_train,"events":events,"times":times})
dims = len(z_names)
w = 0.001*np.random.randn( dims )
epsilon = 0.001
learning_rate = 0.001
mom = 0*w
alpha=0.95
lambda_l1=0.0
lambda_l2=0.0
cost = get_global_cost( data, w, K_p, lambda_l1, lambda_l2, 0 )
print "prelim cost ", -1, cost
min_cost = cost
for i in range(0):
xw = 0.0001*np.random.randn( dims )
cost = get_global_cost( data, xw, K_p, lambda_l1, lambda_l2 )
print "prelim cost ", i, cost
if cost < min_cost:
min_cost = cost
w = xw
cost=min_cost
repeats = range(2)
print -1, cost
#dX = mc*dXprev + lr*(1-mc)*dperf/dX
old_dw=0.0
costs=[]
all_costs=[]
pp.close('all')
# f=pp.figure()
# pp.show()
# pp.ion()
# pp.plot( [-1], [cost], 'ro')
for step in range(500):
grad = np.zeros(dims)
random_off = [] #np.random.permutation(dims)[:dims-10]
for r in repeats:
idx = np.random.randint(len(data))
bernoulli = 2*stats.bernoulli( 0.5 ).rvs(dims) - 1
delta_w = epsilon*bernoulli #np.random.randn(dims)
#random_off = np.random.permutation(dims)[:dims/3]
delta_w[random_off]=0
bernoulli[random_off]=0
w_delta_plus = w + delta_w
w_delta_neg = w - delta_w
cost_delta_plus = get_global_cost( data, w_delta_plus, K_p, lambda_l1, lambda_l2, idx )
cost_delta_neg = get_global_cost( data, w_delta_neg, K_p, lambda_l1, lambda_l2, idx )
grad += bernoulli*(cost_delta_plus-cost_delta_neg)/(2*epsilon)
grad /= len(repeats)
grad += lambda_l2*w + lambda_l1*np.sign(w)
grad = grad / np.linalg.norm(grad)
#grad[random_off] = 0
if step==0:
dw = learning_rate*grad
else:
dw = alpha*old_dw + learning_rate*grad
#w -= learning_rate*grad
dw =learning_rate*grad
w -= dw + 0*learning_rate*np.random.randn(dims)
old_dw = dw
#epsilon *= 0.995
if np.random.rand()<0.1:
print "train cost_delta_plus ", step, cost_delta_plus, cost_delta_neg
if cost_delta_plus < cost:
#w = w_delta_plus
cost = cost_delta_plus
learning_rate *= 1.0
dw = learning_rate*grad
print "***", step, cost, cost_delta_plus, np.sum(np.abs(w))
else:
learning_rate /= 1
costs.append(cost)
all_costs.append(cost_delta_plus)
#
# pp.plot( [step], [cost], 'ro')
# pp.plot( [step], [cost_delta_plus], 'b.')
# pp.plot( [step], [cost_delta_neg], 'b.')
# if np.random.rand()<0.01:
# pp.draw()
# pp.ioff()
# pdb.set_trace()
pp.figure()
pp.plot(all_costs,'o-')
pp.plot(costs,'o-')
pp.savefig( save_dir + "/costs.png", fmt="png" )
#pp.show()
print step, cost, cost_delta_plus, np.sum(np.abs(w))
for tissue_data in data:
z_train = tissue_data["z"]
events = tissue_data["events"]
times = tissue_data["times"]
bcs = tissue_data["barcodes"]
n_tissue = len(events)
tissue_name = tissue_data["tissue"]
print "plotting ",tissue_name
Z_cohort = pd.DataFrame( z_train, index = bcs, columns=z_names )
y = np.dot( z_train, w )
I = np.argsort(y)
n_tissue=len(y)
I_splits = survival_splits( events, I, min(K_p,int(events.sum())) )
groups = groups_by_splits( n_tissue, I_splits )
results = multivariate_logrank_test(times, groups=groups, event_observed=events )
p_value = results.p_value
size1 = max( min( int( n_z*size_per_unit ), 12), 16 )
size2 = max( min( int( n_tissue*size_per_unit), 12), 16)
z_order = np.argsort( -np.abs(w) )
patient_order = np.argsort(y)
m_times_0 = times.values[ I_splits[0]][ events[I_splits[0]].values==1].mean()
m_times_1 = times.values[ I_splits[-1]][ events[I_splits[-1]].values==1].mean()
k_pallette = sns.hls_palette(K_p)
k_pallette = sns.color_palette("rainbow", K_p)
k_pallette.reverse()
if m_times_1 < m_times_0:
# reverse pallette
k_pallette.reverse()
k_colors = np.array([k_pallette[int(i)] for i in groups[patient_order]] )
#pdb.set_trace()
sorted_Z = Z_cohort.values
sorted_Z = sorted_Z[patient_order,:]
sorted_Z = sorted_Z[:,z_order]
#so
sorted_Z = pd.DataFrame( sorted_Z, index = Z_cohort.index.values[patient_order], columns=Z_cohort.columns[z_order] )
#pdb.set_trace()
h = sns.clustermap( sorted_Z, row_colors=k_colors, row_cluster=False, col_cluster=False, figsize=(size1,size2) )
pp.setp(h.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(h.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(h.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
pp.setp(h.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
h.ax_row_dendrogram.set_visible(False)
h.ax_col_dendrogram.set_visible(False)
h.cax.set_visible(False)
h.ax_heatmap.hlines(n_tissue-pp.find(np.diff(groups[patient_order]))-1, *h.ax_heatmap.get_xlim(), color="black", lw=5)
pp.savefig( save_dir + "/%s_learned.png"%(tissue_name), fmt="png" )#, dpi=300, bbox_inches='tight')
pp.close('all')
f = pp.figure()
ax= f.add_subplot(111)
kp = 0
kmf = KaplanMeierFitter()
for i_split in I_splits:
k_bcs = bcs[ i_split ]
if len(k_bcs) > 1:
kmf.fit(times[i_split], event_observed=events[i_split], label="k%d"%(kp) )
ax=kmf.plot(ax=ax,at_risk_counts=False,show_censors=True, color=k_pallette[kp],ci_show=False,lw=4)
kp += 1
pp.ylim(0,1)
pp.title("%s p-value = %0.5f"%(tissue_name,p_value))
pp.savefig( save_dir + "/%s_survival.png"%(tissue_name), format="png" )#, dpi=300)
return Z
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
kmf = main( data_location, results_location ) | mit |
inodb/sufam | sufam/__main__.py | 1 | 14002 | #!/usr/bin/env python
"""
So U Found A Mutation? (SUFAM)
Found a mutation in one or more samples? Now you want to check if they are in
another sample. Unfortunately mutect, varscan or whatever other variant caller
is not calling them. Use SUFAM. The super sensitive validation caller that
calls everything on a given position. All you need is a vcf with the mutations
that you are interested in and the sam/bam file of the sample where you want to
find the same inconsipicuous mutation.
Author: inodb
"""
import argparse
from collections import Counter, namedtuple
import sys
import warnings
import six
import numpy as np
import pandas as pd
import vcf
import sufam
from sufam import mpileup_parser
def _most_common_al(x):
if x["ref"]:
bl = ["A", "C", "G", "T"]
bl.remove(x["ref"])
mc = Counter({k: int(v) for k, v in dict(x.ix[bl]).iteritems()}).most_common(1)[0]
return pd.Series({"most_common_al": str(mc[0]),
"most_common_al_count": str(mc[1]),
"most_common_al_maf": str(mc[1]/float(x["cov"])) if float(x["cov"]) > 0 else "0"})
else:
return pd.Series({"most_common_al": None,
"most_common_al_count": None,
"most_common_al_maf": None})
def _val_al(x):
if x["ref"]:
if len(x["val_ref"]) == 1 and len(x["val_alt"]) == 1: # SNV
al_count = int(x.ix[x["val_alt"]])
al_type = "snv"
al_maf = al_count / float(x["cov"]) if float(x["cov"]) > 0 else None
elif len(x["val_alt"]) > len(x["val_ref"]): # insertion
query = x["val_alt"][len(x["val_ref"]):]
al_count = Counter(x["+"].split(","))[query] if x["+"] is not None else 0
al_type = "insertion"
al_maf = al_count / float(x["cov"])
else: # deletion
query = x["val_ref"][len(x["val_alt"]):]
al_count = Counter(x["-"].split(","))[query] if x["-"] is not None else 0
al_type = "deletion"
al_maf = al_count / float(x["cov"])
return pd.Series({"val_al_type": al_type,
"val_al_count": al_count,
"val_maf": al_maf})
else:
return pd.Series({"val_al_type": None,
"val_al_count": None,
"val_maf": None})
def _most_common_indel(x):
dels = Counter(x["-"].split(",")).most_common(1)[0] if x["-"] else None
ins = Counter(x["+"].split(",")).most_common(1)[0] if x["+"] else None
if ins and dels:
mc = dels if dels[1] >= ins[1] else ins
indel_type = "-" if dels[1] >= ins[1] else "+"
elif ins:
mc = ins
indel_type = "+"
elif dels:
mc = dels
indel_type = "-"
else:
return pd.Series({
"most_common_indel_type": None,
"most_common_indel": None,
"most_common_indel_count": None,
"most_common_indel_maf": None})
return pd.Series({
"most_common_indel_type": indel_type,
"most_common_indel": str(mc[0]),
"most_common_indel_count": str(mc[1]),
"most_common_indel_maf": str(mc[1]/float(x["cov"]) if float(x["cov"]) > 0 else "0")})
def get_baseparser_extended_df(sample, bp_lines, ref, alt):
"""Turn baseParser results into a dataframe"""
columns = "chrom\tpos\tref\tcov\tA\tC\tG\tT\t*\t-\t+".split()
if bp_lines is None:
return None
# change baseparser output to get most common maf per indel
bpdf = pd.DataFrame([[sample] + l.rstrip('\n').split("\t") for l in bp_lines if len(l) > 0],
columns=["sample"] + columns, dtype=np.object)
bpdf[bpdf == ""] = None
# remove zero coverage rows
bpdf = bpdf[bpdf["cov"].astype(int) > 0]
if len(bpdf) == 0:
return None
if ref and alt:
# add columns for validation allele
bpdf = pd.concat([bpdf, pd.DataFrame({"val_ref": pd.Series(ref), "val_alt": pd.Series(alt)})], axis=1)
bpdf = pd.concat([bpdf, bpdf.apply(_val_al, axis=1)], axis=1)
bpdf = pd.concat([bpdf, bpdf.apply(_most_common_indel, axis=1)], axis=1)
bpdf = pd.concat([bpdf, bpdf.apply(_most_common_al, axis=1)], axis=1)
bpdf["most_common_count"] = bpdf.apply(lambda x: max([x.most_common_al_count, x.most_common_indel_count]), axis=1)
bpdf["most_common_maf"] = bpdf.apply(lambda x: max([x.most_common_al_maf, x.most_common_indel_maf]), axis=1)
return bpdf
def filter_out_mutations_in_normal(tumordf, normaldf, most_common_maf_min=0.2,
most_common_count_maf_threshold=20,
most_common_count_min=1):
"""Remove mutations that are in normal"""
df = tumordf.merge(normaldf, on=["chrom", "pos"], suffixes=("_T", "_N"))
# filters
common_al = (df.most_common_al_count_T == df.most_common_count_T) & (df.most_common_al_T == df.most_common_al_N)
common_indel = (df.most_common_indel_count_T == df.most_common_count_T) & \
(df.most_common_indel_T == df.imost_common_indel_N)
normal_criteria = ((df.most_common_count_N >= most_common_count_maf_threshold) &
(df.most_common_maf_N > most_common_maf_min)) | \
((df.most_common_count_N < most_common_count_maf_threshold) &
(df.most_common_count_N > most_common_count_min))
df = df[~(common_al | common_indel) & normal_criteria]
# restore column names of tumor
for c in df.columns:
if c.endswith("_N"):
del df[c]
df.columns = [c[:-2] if c.endswith("_T") else c for c in df.columns]
return df
def select_only_revertant_mutations(bpdf, snv=None, ins=None, dlt=None):
"""
Selects only mutations that revert the given mutations in a single event.
"""
if sum([bool(snv), bool(ins), bool(dlt)]) != 1:
raise(Exception("Should be either snv, ins or del".format(snv)))
if snv:
if snv not in ["A", "C", "G", "T"]:
raise(Exception("snv {} should be A, C, G or T".format(snv)))
return bpdf[(bpdf.most_common_al == snv) & (bpdf.most_common_al_count == bpdf.most_common_count)]
elif bool(ins):
return \
bpdf[((bpdf.most_common_indel.apply(lambda x: len(x) + len(ins) % 3 if x else None) == 0) &
(bpdf.most_common_indel_type == "+") & (bpdf.most_common_count == bpdf.most_common_indel_count)) |
((bpdf.most_common_indel.apply(lambda x: len(ins) - len(x) % 3 if x else None) == 0) &
(bpdf.most_common_indel_type == "-") & (bpdf.most_common_count == bpdf.most_common_indel_count))]
elif bool(dlt):
return \
bpdf[((bpdf.most_common_indel.apply(lambda x: len(x) - len(dlt) % 3 if x else None) == 0) &
(bpdf.most_common_indel_type == "+") & (bpdf.most_common_count == bpdf.most_common_indel_count)) |
((bpdf.most_common_indel.apply(lambda x: -len(dlt) - len(x) % 3 if x else None) == 0) &
(bpdf.most_common_indel_type == "-") & (bpdf.most_common_count == bpdf.most_common_indel_count))]
else:
# should never happen
raise(Exception("No mutation given?"))
def _write_bp(outfile, bp, header, output_format):
if output_format == "sufam":
outfile.write("\t".join(bp.where(pd.notnull(bp), np.nan).get(header, None).astype(str)) + "\n")
elif output_format == "matrix":
outfile.write("1\n" if bp.val_al_count > 0 else "0\n")
else:
raise(Exception("Unrecognized output format"))
def _write_bp_vcf(outfile, bps, vcf_writer, record):
def determine_genotype(bp):
ref = int(int(bp['cov']) - bp.val_al_count > 0)
alt = int(bp.val_al_count > 0)
return '{}/{}'.format(ref, alt)
record.FORMAT = "GT:AD:DP"
_CallDataFormat = namedtuple('CallDataFormat', 'GT AD DP'.split())
samp_fmt = vcf.parser.Reader
calls = []
for bp in bps:
call = vcf.model._Call(None,
None,
_CallDataFormat(GT=determine_genotype(bp),
AD=[int(bp['cov']) - bp.val_al_count, bp.val_al_count],
DP=bp['cov']))
calls += [call]
record.samples = calls
if record.FILTER is None:
record.FILTER = []
vcf_writer.write_record(record)
def validate_mutations(vcffile, bams, reffa, chr_reffa, samples, output_format, outfile,
mpileup_parameters=mpileup_parser.MPILEUP_DEFAULT_PARAMS):
"""Check if mutations in vcf are in bam"""
output_header = "sample chrom pos ref cov A C G T * - + " \
"val_ref val_alt val_al_type val_al_count val_maf "\
"most_common_indel most_common_indel_count most_common_indel_maf most_common_indel_type most_common_al " \
"most_common_al_count most_common_al_maf most_common_count most_common_maf".split()
# for backwards compatibility
# if bam or samples is a string, convert to list instead
if isinstance(samples, six.string_types):
samples = [samples]
if isinstance(bams, six.string_types):
bams = [bams]
if output_format == 'vcf':
vcf_reader = vcf.Reader(open(vcffile))
vcf_reader.samples = samples
vcf_reader.formats['GT'] = vcf.parser._Format(id='GT', num=1, type='String', desc="Genotype")
vcf_reader.formats['AD'] = vcf.parser._Format(id='AD', num='R', type='Integer', desc="Allelic depth")
vcf_reader.formats['DP'] = vcf.parser._Format(id='DP', num=1, type='Integer', desc="Depth")
vcf_writer = vcf.Writer(outfile, vcf_reader)
else:
vcf_reader = open(vcffile)
if output_format == "sufam":
outfile.write("\t".join(output_header))
outfile.write("\n")
for record in vcf_reader:
if output_format != 'vcf':
line = record
if line.startswith("#CHROM"):
header = line[1:].rstrip('\n').split("\t")
# create spoof pyvcf record if vcf_reader is not used
_Record = namedtuple('Record', header)
if line.startswith("#"):
continue
if len(header) == 0:
raise(Exception("No header found in vcf file #CHROM not found"))
# zip all column values, except alt (needs to be list in pyvcf)
record_args = dict(zip(header, line.rstrip('\n').split("\t")))
record_args['ALT'] = [record_args['ALT']]
record = _Record(**record_args)
# determine type of mutation
record_type = "snv"
if len(record.ALT) > 1:
warnings.warn("Multiple ALT in one record is not implemented - using first")
if len(record.REF) > len(record.ALT[0]):
record_type = "deletion"
elif len(record.ALT[0]) > len(record.REF):
record_type = "insertion"
# no coverage results
no_cov = pd.Series({
"chrom": str(record.CHROM), "pos": str(record.POS),
"ref": str(record.REF),
"cov": 0, "A": 0, "C": 0, "G": 0, "T": 0,
"val_ref": str(record.REF), "val_alt": str(record.ALT[0]),
"val_al_type": record_type, "val_al_count": 0, "val_maf": 0})
# collect mpileup baseparser results per bam
bps = []
for i, bam in enumerate(bams):
sample = samples[i]
no_cov['sample'] = sample
bp_lines = mpileup_parser.run_and_parse(bam, str(record.CHROM), str(record.POS), str(record.POS), reffa,
chr_reffa, mpileup_parameters)
bpdf = get_baseparser_extended_df(sample, bp_lines, str(record.REF), str(record.ALT[0]))
if bpdf is None:
bp = no_cov
else:
bp = bpdf.ix[0, :]
bps += [bp]
# output call
if output_format == "vcf":
_write_bp_vcf(outfile, bps, vcf_writer, record)
else:
# only one bam file supported for outputs other than vcf
_write_bp(outfile, bps[0], output_header, output_format)
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("reffa", type=str, help="Reference genome (fasta)")
parser.add_argument("vcf", type=str, help="VCF with mutations to be validated")
parser.add_argument("bam", type=str, nargs='+', help="BAMs to find mutations in (only --format vcf supports > 1)")
parser.add_argument("--chr_reffa", type=str, default = None, help="chr reference genome (fasta) - reference file with chr")
parser.add_argument("--sample_name", type=str, nargs='+', default=None, help="Set name "
"of sample, used in output [name of bam].")
parser.add_argument("--format", type=str, choices=["matrix", "sufam", "vcf"], default="sufam",
help="Set output format [sufam]")
parser.add_argument("--mpileup-parameters", type=str, default=mpileup_parser.MPILEUP_DEFAULT_PARAMS,
help="Set options for mpileup [{}]".format(mpileup_parser.MPILEUP_DEFAULT_PARAMS))
parser.add_argument("--version", action='version', version=sufam.__version__)
args = parser.parse_args()
if args.sample_name is None:
args.sample_name = args.bam
if len(args.bam) > 1 and args.format != 'vcf':
raise(Exception('Multiple bam files is only supported for --format vcf'))
if len(args.sample_name) != len(args.bam):
raise(Exception('# of --sample_name arguments should be equal to # of bams'))
validate_mutations(args.vcf, args.bam, args.reffa, args.chr_reffa, args.sample_name,
args.format, sys.stdout, mpileup_parameters=args.mpileup_parameters)
if __name__ == "__main__":
main()
| mit |
ddempsey/PyFEHM | fvars.py | 1 | 44312 | """Functions for FEHM thermodynamic variables calculations."""
"""
Copyright 2013.
Los Alamos National Security, LLC.
This material was produced under U.S. Government contract DE-AC52-06NA25396 for
Los Alamos National Laboratory (LANL), which is operated by Los Alamos National
Security, LLC for the U.S. Department of Energy. The U.S. Government has rights
to use, reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES
ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified to produce
derivative works, such modified software should be clearly marked, so as not to
confuse it with the version available from LANL.
Additionally, this library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your option)
any later version. Accordingly, this library is distributed in the hope that it
will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
Public License for more details.
"""
import numpy as np
from ftool import*
from scipy import interpolate
from fdflt import*
dflt = fdflt()
YEL=[ 0.25623465e-03, 0.10184405e-02, 0.22554970e-04, 0.34836663e-07, 0.41769866e-02, -0.21244879e-04,
0.25493516e-07, 0.89557885e-04, 0.10855046e-06, -0.21720560e-06,]
YEV=[ 0.31290881e+00, -0.10e+01, 0.25748596e-01, 0.38846142e-03, 0.11319298e-01, 0.20966376e-04,
0.74228083e-08, 0.19206133e-02, -0.10372453e-03, 0.59104245e-07,]
YDL=[ 0.10000000e+01, 0.17472599e-01, -0.20443098e-04, -0.17442012e-06, 0.49564109e-02, -0.40757664e-04,
0.50676664e-07, 0.50330978e-04, 0.33914814e-06, -0.18383009e-06,]
YDV=[ 0.15089524e-05, 0.10000000e+01, -0.10000000e+01, -0.16676705e-02, 0.40111210e-07, 0.25625316e-10,
-0.40479650e-12, 0.43379623e-01, 0.24991800e-02, -0.94755043e-04,]
YVL=[ 0.17409149e-02, 0.18894882e-04, -0.66439332e-07, -0.23122388e-09, -0.31534914e-05, 0.11120716e-07,
-0.48576020e-10, 0.28006861e-07, 0.23225035e-09, 0.47180171e-10,]
YVV=[-0.13920783e-03, 0.98434337e-02, -0.51504232e-03, 0.62554603e-04, 0.27105772e-04, 0.84981906e-05,
0.34539757e-07, -0.25524682e-03, 0, 0.12316788e-05,]
ZEL=[ 0.10000000e+01, 0.23513278e-01, 0.48716386e-04, -0.19935046e-08, -0.50770309e-02, 0.57780287e-05,
0.90972916e-09, -0.58981537e-04, -0.12990752e-07, 0.45872518e-08,]
ZEV=[ 0.12511319e+00, -0.36061317e+00, 0.58668929e-02, 0.99059715e-04, 0.44331611e-02, 0.50902084e-05,
-0.10812602e-08, 0.90918809e-03, -0.26960555e-04, -0.36454880e-06,]
ZDL=[ 0.10009476e-02, 0.16812589e-04, -0.24582622e-07, -0.17014984e-09, 0.48841156e-05, -0.32967985e-07,
0.28619380e-10, 0.53249055e-07, 0.30456698e-09, -0.12221899e-09,]
ZDV=[ 0.12636224e+00, -0.30463489e+00, 0.27981880e-02, 0.51132337e-05, 0.59318010e-02, 0.80972509e-05,
-0.43798358e-07, 0.53046787e-03, -0.84916607e-05, 0.48444919e-06,]
ZVL=[ 0.10000000e+01, 0.10523153e-01, -0.22658391e-05, -0.31796607e-06, 0.29869141e-01, 0.21844248e-03,
-0.87658855e-06, 0.41690362e-03, -0.25147022e-05, 0.22144660e-05,]
ZVV=[ 0.10000000e+01, 0.10000000e+01, -0.10e1 , -0.10e1 , 0.10000000e+01, 0.0000000e+01,
-0.22934622e-03, 0.10000000e+01, 0 , 0.25834551e-01,]
YSP=[ 0.71725602e-03, 0.22607516e-04, 0.26178556e-05, -0.10516335e-07, 0.63167028e-09,]
YST=[-0.25048121e-05, 0.45249584e-02, 0.33551528e+00, 0.10000000e+01, 0.12254786e+00,]
ZSP=[ 0.10000000e+01, -0.22460012e-02, 0.30234492e-05, -0.32466525e-09, 0.0,]
ZST=[0.20889841e-06, 0.11587544e-03, 0.31934455e-02, 0.45538151e-02, 0.23756593e-03,]
# co2 solubility globals, from FEHM source (params_eosco2.h)
DENC, TC, RG, PC = [467.6e0, 304.1282e0, 0.1889241e0, 7.3773e0]
A = [8.37304456e0,-3.70454304e0,2.5e0,1.99427042e0,0.62105248e0,0.41195293e0,1.04028922e0,0.08327678e0]
PHIC = [0.0e0,0.0e0,0.0e0,3.15163e0,6.1119e0,6.77708e0,11.32384e0,27.08792e0]
N = [0.38856823203161e0,0.2938547594274e1,-0.55867188534934e1,-0.76753199592477e0,0.31729005580416e0,0.54803315897767e0,
0.12279411220335e0,0.2165896154322e1,0.15841735109724e1,-0.23132705405503e0,0.58116916431436e-1,-0.55369137205382e0,0.48946615909422e0,
-0.24275739843501e-1,0.62494790501678e-1,-0.12175860225246e0,-0.37055685270086e0,-0.16775879700426e-1,-0.11960736637987e0,
-0.45619362508778e-1,0.35612789270346e-1,-0.74427727132052e-2,-0.17395704902432e-2,-0.21810121289527e-1,0.24332166559236e-1,
-0.37440133423463e-1,0.14338715756878e0,-0.13491969083286e0,-0.2315122505348e-1,0.12363125492901e-1,0.2105832197294e-2,
-0.33958519026368e-3,0.55993651771592e-2,-0.30335118055646e-3,-0.2136548868832e3,0.26641569149272e5,-0.24027212204557e5,
-0.28341603423999e3,0.21247284400179e3,-0.66642276540751e0,0.72608632349897e0,0.55068668612842e-1]
C = [0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,3,3,3,4,4,4,4,4,4,5,6]
D = [1,1,1,1,2,2,3,1,2,4,5,5,5,6,6,6,1,1,4,4,4,7,8,2,3,3,5,5,6,7,8,10,4,8,2,2,2,3,3]
TI = [0.e0, 0.75e0,1.0e0,2.0e0,0.75e0,2.0e0,0.75e0,1.5e0,1.5e0,2.5e0,0.0e0,1.5e0,2.0e0,0.0e0,1.0e0,2.0e0,3.0e0,6.0e0,3.0e0,6.0e0,
8.0e0,6.0e0,0.0e0,7.0e0,12.0e0,16.0e0,22.0e0,24.0e0,16.0e0,24.0e0,8.0e0,2.0e0,28.0e0,14.0e0,1.0e0,0.0e0,1.0e0,3.0e0,3.0e0]
ALPHA = [25.,25.,25.,15.,20.]
ALPHA = np.concatenate([np.zeros(34), ALPHA])
BETA = [325.,300.,300.,275.,275.,0.3,0.3,0.3]
BETA = np.concatenate([np.zeros(34), BETA])
GAMMA = [1.16,1.19,1.19,1.25,1.22]
GAMMA = np.concatenate([np.zeros(34), GAMMA])
IPSILON = [1.,1.,1.,1.,1.]
IPSILON = np.concatenate([np.zeros(34), IPSILON])
ACO2 = [3.5,3.5,3.0]
ACO2 = np.concatenate([np.zeros(39), ACO2])
BCO2 = [0.875,0.925,0.875]
BCO2 = np.concatenate([np.zeros(39), BCO2])
CAPA = [0.7,0.7,0.7]
CAPA = np.concatenate([np.zeros(39), CAPA])
CAPB = [0.3,0.3,1.]
CAPB = np.concatenate([np.zeros(39), CAPB])
CAPC = [10.,10.,12.5]
CAPC = np.concatenate([np.zeros(39), CAPC])
CAPD = [275.,275.,275.]
CAPD = np.concatenate([np.zeros(39), CAPD])
CP = [-38.640844, 5.8948420, 59.876516, 26.654627, 10.637097]
co2_interp_path = dflt.co2_interp_path
co2Vars = False
if co2_interp_path != '' and os.path.isfile(co2_interp_path):
co2Vars = True
if os.path.isfile('co2_interp_table.txt'):
co2_interp_path = './co2_interp_table.txt'
co2Vars = True
if co2Vars:
with open(co2_interp_path,'r') as f:
f.readline()
line = f.readline()
tn,pn,na = line.split()[:3]
tn = int(tn); pn = int(pn); na = int(na)
f.readline()
f.readline()
f.readline()
f.readline()
# read in temperature data
keepReading = True
T = []
while keepReading:
line = f.readline()
if '>' in line: break
T.append(line.strip().split())
T = list(flatten(T))
Tval = np.array([float(t) for t in T])
Tdict = dict([(t,i) for i,t in enumerate(Tval)])
# read in pressure data
P = []
while keepReading:
line = f.readline()
if '>' in line: break
P.append(line.strip().split())
P = list(flatten(P))
Pval = np.array([float(p) for p in P])
Pdict = dict([(p,i) for i,p in enumerate(Pval)])
# read to array data
while keepReading:
line = f.readline()
if '>' in line: break
# read in array data
arraynames = ['density','dddt','dddp','enthalpy','dhdt','dhdp','viscosity','dvdt','dvdp']
arrays = {}
for arrayname in arraynames:
array = []
while keepReading:
line = f.readline()
if '>' in line: break
array.append(line.strip().split())
array = list(flatten(array))
array = np.array([float(a) for a in array])
arrays.update(dict(((arrayname,array),)))
while keepReading:
line = f.readline()
if 'Number of saturation line vertices' in line:
n_sv = int(line.split()[0])
break
f.readline()
# read in saturation line vertices
co2_sat_P = []
co2_sat_T = []
co2_sat_i = []
for n in range(n_sv):
vs = f.readline().split()
co2_sat_i.append(int(vs[3]))
co2_sat_P.append(float(vs[0]))
co2_sat_T.append(float(vs[1]))
while keepReading:
line = f.readline()
if '>' in line: break
f.readline()
co2l_arrays = {}
array = []
for n in range(n_sv):
array.append([float(v) for v in f.readline().strip().split()])
array = np.array(array)
for i,arrayname in enumerate(arraynames):
co2l_arrays[arrayname] = array[:,i]
while keepReading:
line = f.readline()
if '>' in line: break
f.readline()
co2g_arrays = {}
array = []
for n in range(n_sv):
array.append([float(v) for v in f.readline().strip().split()])
array = np.array(array)
for i,arrayname in enumerate(arraynames):
co2g_arrays[arrayname] = array[:,i]
def dens(P,T,derivative=''):
"""Return liquid water, vapor water and CO2 density, or derivatives with respect to temperature or pressure, for specified temperature and pressure.
:param P: Pressure (MPa).
:type P: fl64
:param T: Temperature (degC)
:type T: fl64
:param derivative: Supply 'T' or 'temperature' for derivatives with respect to temperature, or 'P' or 'pressure' for derivatives with respect to pressure.
:type T: str
:returns: Three element tuple containing (liquid, vapor, CO2) density or derivatives if requested.
"""
if hasattr(P, "__len__"):
P = np.array(P)
else:
P = np.array([P])
if hasattr(T, "__len__"):
T = np.array(T)
else:
T = np.array([T])
# calculate water properties
if not derivative:
YL0 = (YDL[0] +
YDL[1]*P +
YDL[2]*P**2 +
YDL[3]*P**3 +
YDL[4]*T +
YDL[5]*T**2 +
YDL[6]*T**3 +
YDL[7]*P*T +
YDL[8]*P**2*T +
YDL[9]*P*T**2)
ZL0 = (ZDL[0] +
ZDL[1]*P +
ZDL[2]*P**2 +
ZDL[3]*P**3 +
ZDL[4]*T +
ZDL[5]*T**2 +
ZDL[6]*T**3 +
ZDL[7]*P*T +
ZDL[8]*P**2*T +
ZDL[9]*P*T**2)
YV0 = (YDV[0] +
YDV[1]*P +
YDV[2]*P**2 +
YDV[3]*P**3 +
YDV[4]*T +
YDV[5]*T**2 +
YDV[6]*T**3 +
YDV[7]*P*T +
YDV[8]*P**2*T +
YDV[9]*P*T**2)
ZV0 = (ZDV[0] +
ZDV[1]*P +
ZDV[2]*P**2 +
ZDV[3]*P**3 +
ZDV[4]*T +
ZDV[5]*T**2 +
ZDV[6]*T**3 +
ZDV[7]*P*T +
ZDV[8]*P**2*T +
ZDV[9]*P*T**2)
dens_l = YL0/ZL0
dens_v = YV0/ZV0
elif derivative in ['P','pressure']:
# terms
YL0 = (YDL[0] +
YDL[1]*P +
YDL[2]*P**2 +
YDL[3]*P**3 +
YDL[4]*T +
YDL[5]*T**2 +
YDL[6]*T**3 +
YDL[7]*P*T +
YDL[8]*P**2*T +
YDL[9]*P*T**2)
ZL0 = (ZDL[0] +
ZDL[1]*P +
ZDL[2]*P**2 +
ZDL[3]*P**3 +
ZDL[4]*T +
ZDL[5]*T**2 +
ZDL[6]*T**3 +
ZDL[7]*P*T +
ZDL[8]*P**2*T +
ZDL[9]*P*T**2)
YV0 = (YDV[0] +
YDV[1]*P +
YDV[2]*P**2 +
YDV[3]*P**3 +
YDV[4]*T +
YDV[5]*T**2 +
YDV[6]*T**3 +
YDV[7]*P*T +
YDV[8]*P**2*T +
YDV[9]*P*T**2)
ZV0 = (ZDV[0] +
ZDV[1]*P +
ZDV[2]*P**2 +
ZDV[3]*P**3 +
ZDV[4]*T +
ZDV[5]*T**2 +
ZDV[6]*T**3 +
ZDV[7]*P*T +
ZDV[8]*P**2*T +
ZDV[9]*P*T**2)
# derivatives
YL1 = (YDL[1] +
YDL[2]*P*2 +
YDL[3]*P**2*3 +
YDL[7]*T +
YDL[8]*P*2*T +
YDL[9]*T**2)
ZL1 = (ZDL[1] +
ZDL[2]*P*2 +
ZDL[3]*P**2*3 +
ZDL[7]*T +
ZDL[8]*P*2*T +
ZDL[9]*T**2)
YV1 = (YDV[1] +
YDV[2]*P*2 +
YDV[3]*P**2*3 +
YDV[7]*T +
YDV[8]*P*2*T +
YDV[9]*T**2)
ZV1 = (ZDV[1] +
ZDV[2]*P*2 +
ZDV[3]*P**2*3 +
ZDV[7]*T +
ZDV[8]*P*2*T +
ZDV[9]*T**2)
dens_l = (ZL0*YL1-YL0*ZL1)/ZL0**2
dens_v = (ZV0*YV1-YV0*ZV1)/ZV0**2
elif derivative in ['T','temperature']:
# terms
YL0 = (YDL[0] +
YDL[1]*P +
YDL[2]*P**2 +
YDL[3]*P**3 +
YDL[4]*T +
YDL[5]*T**2 +
YDL[6]*T**3 +
YDL[7]*P*T +
YDL[8]*P**2*T +
YDL[9]*P*T**2)
ZL0 = (ZDL[0] +
ZDL[1]*P +
ZDL[2]*P**2 +
ZDL[3]*P**3 +
ZDL[4]*T +
ZDL[5]*T**2 +
ZDL[6]*T**3 +
ZDL[7]*P*T +
ZDL[8]*P**2*T +
ZDL[9]*P*T**2)
YV0 = (YDV[0] +
YDV[1]*P +
YDV[2]*P**2 +
YDV[3]*P**3 +
YDV[4]*T +
YDV[5]*T**2 +
YDV[6]*T**3 +
YDV[7]*P*T +
YDV[8]*P**2*T +
YDV[9]*P*T**2)
ZV0 = (ZDV[0] +
ZDV[1]*P +
ZDV[2]*P**2 +
ZDV[3]*P**3 +
ZDV[4]*T +
ZDV[5]*T**2 +
ZDV[6]*T**3 +
ZDV[7]*P*T +
ZDV[8]*P**2*T +
ZDV[9]*P*T**2)
# derivatives
YL1 = (YDL[4] +
YDL[5]*T*2 +
YDL[6]*T**2*3 +
YDL[7]*P +
YDL[8]*P**2 +
YDL[9]*P*T*2)
ZL1 = (ZDL[4] +
ZDL[5]*T*2 +
ZDL[6]*T**2*3 +
ZDL[7]*P +
ZDL[8]*P**2 +
ZDL[9]*P*T*2)
YV1 = (YDV[4] +
YDV[5]*T*2 +
YDV[6]*T**2*3 +
YDV[7]*P +
YDV[8]*P**2 +
YDV[9]*P*T*2)
ZV1 = (ZDV[4] +
ZDV[5]*T*2 +
ZDV[6]*T**2*3 +
ZDV[7]*P +
ZDV[8]*P**2 +
ZDV[9]*P*T*2)
dens_l = (ZL0*YL1-YL0*ZL1)/ZL0**2
dens_v = (ZV0*YV1-YV0*ZV1)/ZV0**2
else: print('not a valid derivative'); return
if not co2Vars: return (dens_l,dens_v,np.array([]))
# calculate co2 properties
if not derivative: k = 'density'
elif derivative in ['P','pressure']: k = 'dddp'
elif derivative in ['T','temperature']: k = 'dddt'
arr_z = arrays[k][0:len(Pval)*len(Tval)].reshape(len(Pval),len(Tval))
fdens = interpolate.interp2d( Tval, Pval, arr_z )
dens_c = [fdens(t,p)[0] for t,p in zip(T,P)]
return (dens_l,dens_v,np.array(dens_c))
def enth(P,T,derivative=''):
"""Return liquid water, vapor water and CO2 enthalpy, or derivatives with respect to temperature or pressure, for specified temperature and pressure.
:param P: Pressure (MPa).
:type P: fl64
:param T: Temperature (degC)
:type T: fl64
:param derivative: Supply 'T' or 'temperature' for derivatives with respect to temperature, or 'P' or 'pressure' for derivatives with respect to pressure.
:type T: str
:returns: Three element tuple containing (liquid, vapor, CO2) enthalpy or derivatives if requested.
"""
P = np.array(P)
T = np.array(T)
if not derivative:
YL0 = (YEL[0] +
YEL[1]*P +
YEL[2]*P**2 +
YEL[3]*P**3 +
YEL[4]*T +
YEL[5]*T**2 +
YEL[6]*T**3 +
YEL[7]*P*T +
YEL[8]*P**2*T +
YEL[9]*P*T**2)
ZL0 = (ZEL[0] +
ZEL[1]*P +
ZEL[2]*P**2 +
ZEL[3]*P**3 +
ZEL[4]*T +
ZEL[5]*T**2 +
ZEL[6]*T**3 +
ZEL[7]*P*T +
ZEL[8]*P**2*T +
ZEL[9]*P*T**2)
YV0 = (YEV[0] +
YEV[1]*P +
YEV[2]*P**2 +
YEV[3]*P**3 +
YEV[4]*T +
YEV[5]*T**2 +
YEV[6]*T**3 +
YEV[7]*P*T +
YEV[8]*P**2*T +
YEV[9]*P*T**2)
ZV0 = (ZEV[0] +
ZEV[1]*P +
ZEV[2]*P**2 +
ZEV[3]*P**3 +
ZEV[4]*T +
ZEV[5]*T**2 +
ZEV[6]*T**3 +
ZEV[7]*P*T +
ZEV[8]*P**2*T +
ZEV[9]*P*T**2)
dens_l = YL0/ZL0
dens_v = YV0/ZV0
elif derivative in ['P','pressure']:
# terms
YL0 = (YEL[0] +
YEL[1]*P +
YEL[2]*P**2 +
YEL[3]*P**3 +
YEL[4]*T +
YEL[5]*T**2 +
YEL[6]*T**3 +
YEL[7]*P*T +
YEL[8]*P**2*T +
YEL[9]*P*T**2)
ZL0 = (ZEL[0] +
ZEL[1]*P +
ZEL[2]*P**2 +
ZEL[3]*P**3 +
ZEL[4]*T +
ZEL[5]*T**2 +
ZEL[6]*T**3 +
ZEL[7]*P*T +
ZEL[8]*P**2*T +
ZEL[9]*P*T**2)
YV0 = (YEV[0] +
YEV[1]*P +
YEV[2]*P**2 +
YEV[3]*P**3 +
YEV[4]*T +
YEV[5]*T**2 +
YEV[6]*T**3 +
YEV[7]*P*T +
YEV[8]*P**2*T +
YEV[9]*P*T**2)
ZV0 = (ZEV[0] +
ZEV[1]*P +
ZEV[2]*P**2 +
ZEV[3]*P**3 +
ZEV[4]*T +
ZEV[5]*T**2 +
ZEV[6]*T**3 +
ZEV[7]*P*T +
ZEV[8]*P**2*T +
ZEV[9]*P*T**2)
# derivatives
YL1 = (YEL[1] +
YEL[2]*P*2 +
YEL[3]*P**2*3 +
YEL[7]*T +
YEL[8]*P*2*T +
YEL[9]*T**2)
ZL1 = (ZEL[1] +
ZEL[2]*P*2 +
ZEL[3]*P**2*3 +
ZEL[7]*T +
ZEL[8]*P*2*T +
ZEL[9]*T**2)
YV1 = (YEV[1] +
YEV[2]*P*2 +
YEV[3]*P**2*3 +
YEV[7]*T +
YEV[8]*P*2*T +
YEV[9]*T**2)
ZV1 = (ZEV[1] +
ZEV[2]*P*2 +
ZEV[3]*P**2*3 +
ZEV[7]*T +
ZEV[8]*P*2*T +
ZEV[9]*T**2)
dens_l = (ZL0*YL1-YL0*ZL1)/ZL0**2
dens_v = (ZV0*YV1-YV0*ZV1)/ZV0**2
elif derivative in ['T','temperature']:
# terms
YL0 = (YEL[0] +
YEL[1]*P +
YEL[2]*P**2 +
YEL[3]*P**3 +
YEL[4]*T +
YEL[5]*T**2 +
YEL[6]*T**3 +
YEL[7]*P*T +
YEL[8]*P**2*T +
YEL[9]*P*T**2)
ZL0 = (ZEL[0] +
ZEL[1]*P +
ZEL[2]*P**2 +
ZEL[3]*P**3 +
ZEL[4]*T +
ZEL[5]*T**2 +
ZEL[6]*T**3 +
ZEL[7]*P*T +
ZEL[8]*P**2*T +
ZEL[9]*P*T**2)
YV0 = (YEV[0] +
YEV[1]*P +
YEV[2]*P**2 +
YEV[3]*P**3 +
YEV[4]*T +
YEV[5]*T**2 +
YEV[6]*T**3 +
YEV[7]*P*T +
YEV[8]*P**2*T +
YEV[9]*P*T**2)
ZV0 = (ZEV[0] +
ZEV[1]*P +
ZEV[2]*P**2 +
ZEV[3]*P**3 +
ZEV[4]*T +
ZEV[5]*T**2 +
ZEV[6]*T**3 +
ZEV[7]*P*T +
ZEV[8]*P**2*T +
ZEV[9]*P*T**2)
# derivatives
YL1 = (YEL[4] +
YEL[5]*T*2 +
YEL[6]*T**2*3 +
YEL[7]*P +
YEL[8]*P**2 +
YEL[9]*P*T*2)
ZL1 = (ZEL[4] +
ZEL[5]*T*2 +
ZEL[6]*T**2*3 +
ZEL[7]*P +
ZEL[8]*P**2 +
ZEL[9]*P*T*2)
YV1 = (YEV[4] +
YEV[5]*T*2 +
YEV[6]*T**2*3 +
YEV[7]*P +
YEV[8]*P**2 +
YEV[9]*P*T*2)
ZV1 = (ZEV[4] +
ZEV[5]*T*2 +
ZEV[6]*T**2*3 +
ZEV[7]*P +
ZEV[8]*P**2 +
ZEV[9]*P*T*2)
dens_l = (ZL0*YL1-YL0*ZL1)/ZL0**2
dens_v = (ZV0*YV1-YV0*ZV1)/ZV0**2
else: print('not a valid derivative'); return
if not co2Vars: return (dens_l,dens_v,np.array([]))
# calculate co2 properties
if not derivative: k = 'enthalpy'
elif derivative in ['P','pressure']: k = 'dhdp'
elif derivative in ['T','temperature']: k = 'dhdt'
if not P.shape: P = np.array([P])
if not T.shape: T = np.array([T])
if P.size == 1 and not T.size == 1: P = P*np.ones((1,len(T)))[0]
elif T.size == 1 and not P.size == 1: T = T*np.ones((1,len(P)))[0]
# calculate bounding values of P
dens_c = []
for Pi, Ti in zip(P,T):
if Pi<=Pval[0]: p0 = Pval[0]; p1 = Pval[0]
elif Pi>=Pval[-1]: p0 = Pval[-1]; p1 = Pval[-1]
else:
p0 = Pval[0]
for p in Pval[1:]:
if Pi<=p:
p1 = p; break
else:
p0 = p
# calculate bounding values of T
if Ti<=Tval[0]: t0 = Tval[0]; t1 = Tval[0]
elif Ti>=Tval[-1]: t0 = Tval[-1]; t1 = Tval[-1]
else:
t0 = Tval[0]
for t in Tval[1:]:
if Ti<=t:
t1 = t; break
else:
t0 = t
# calculate four indices
dt0 = abs(Ti-t0); dt1 = abs(Ti-t1); dp0 = abs(Pi-p0); dp1 = abs(Pi-p1)
t0 = Tdict[t0]; t1 = Tdict[t1]; p0 = Pdict[p0]; p1 = Pdict[p1]
i1 = p0*tn+t0
i2 = p0*tn+t1
i3 = p1*tn+t0
i4 = p1*tn+t1
# locate value in array
v1 = arrays[k][i1]
v2 = arrays[k][i2]
v3 = arrays[k][i3]
v4 = arrays[k][i4]
dens_c.append((p0*(t1*v3+t0*v4)/(t1+t0) + p1*(t1*v1+t0*v2)/(t1+t0))/(p0+p1))
return (dens_l,dens_v,np.array(dens_c))
def visc(P,T,derivative=''):
"""Return liquid water, vapor water and CO2 viscosity, or derivatives with respect to temperature or pressure, for specified temperature and pressure.
:param P: Pressure (MPa).
:type P: fl64
:param T: Temperature (degC)
:type T: fl64
:param derivative: Supply 'T' or 'temperature' for derivatives with respect to temperature, or 'P' or 'pressure' for derivatives with respect to pressure.
:type T: str
:returns: Three element tuple containing (liquid, vapor, CO2) viscosity or derivatives if requested.
"""
P = np.array(P)
T = np.array(T)
if not derivative:
YL0 = (YVL[0] +
YVL[1]*P +
YVL[2]*P**2 +
YVL[3]*P**3 +
YVL[4]*T +
YVL[5]*T**2 +
YVL[6]*T**3 +
YVL[7]*P*T +
YVL[8]*P**2*T +
YVL[9]*P*T**2)
ZL0 = (ZVL[0] +
ZVL[1]*P +
ZVL[2]*P**2 +
ZVL[3]*P**3 +
ZVL[4]*T +
ZVL[5]*T**2 +
ZVL[6]*T**3 +
ZVL[7]*P*T +
ZVL[8]*P**2*T +
ZVL[9]*P*T**2)
YV0 = (YVV[0] +
YVV[1]*P +
YVV[2]*P**2 +
YVV[3]*P**3 +
YVV[4]*T +
YVV[5]*T**2 +
YVV[6]*T**3 +
YVV[7]*P*T +
YVV[8]*P**2*T +
YVV[9]*P*T**2)
ZV0 = (ZVV[0] +
ZVV[1]*P +
ZVV[2]*P**2 +
ZVV[3]*P**3 +
ZVV[4]*T +
ZVV[5]*T**2 +
ZVV[6]*T**3 +
ZVV[7]*P*T +
ZVV[8]*P**2*T +
ZVV[9]*P*T**2)
dens_l = YL0/ZL0
dens_v = YV0/ZV0
elif derivative in ['P','pressure']:
# terms
YL0 = (YVL[0] +
YVL[1]*P +
YVL[2]*P**2 +
YVL[3]*P**3 +
YVL[4]*T +
YVL[5]*T**2 +
YVL[6]*T**3 +
YVL[7]*P*T +
YVL[8]*P**2*T +
YVL[9]*P*T**2)
ZL0 = (ZVL[0] +
ZVL[1]*P +
ZVL[2]*P**2 +
ZVL[3]*P**3 +
ZVL[4]*T +
ZVL[5]*T**2 +
ZVL[6]*T**3 +
ZVL[7]*P*T +
ZVL[8]*P**2*T +
ZVL[9]*P*T**2)
YV0 = (YVV[0] +
YVV[1]*P +
YVV[2]*P**2 +
YVV[3]*P**3 +
YVV[4]*T +
YVV[5]*T**2 +
YVV[6]*T**3 +
YVV[7]*P*T +
YVV[8]*P**2*T +
YVV[9]*P*T**2)
ZV0 = (ZVV[0] +
ZVV[1]*P +
ZVV[2]*P**2 +
ZVV[3]*P**3 +
ZVV[4]*T +
ZVV[5]*T**2 +
ZVV[6]*T**3 +
ZVV[7]*P*T +
ZVV[8]*P**2*T +
ZVV[9]*P*T**2)
# derivatives
YL1 = (YVL[1] +
YVL[2]*P*2 +
YVL[3]*P**2*3 +
YVL[7]*T +
YVL[8]*P*2*T +
YVL[9]*T**2)
ZL1 = (ZVL[1] +
ZVL[2]*P*2 +
ZVL[3]*P**2*3 +
ZVL[7]*T +
ZVL[8]*P*2*T +
ZVL[9]*T**2)
YV1 = (YVV[1] +
YVV[2]*P*2 +
YVV[3]*P**2*3 +
YVV[7]*T +
YVV[8]*P*2*T +
YVV[9]*T**2)
ZV1 = (ZVV[1] +
ZVV[2]*P*2 +
ZVV[3]*P**2*3 +
ZVV[7]*T +
ZVV[8]*P*2*T +
ZVV[9]*T**2)
dens_l = (ZL0*YL1-YL0*ZL1)/ZL0**2
dens_v = (ZV0*YV1-YV0*ZV1)/ZV0**2
elif derivative in ['T','temperature']:
# terms
YL0 = (YVL[0] +
YVL[1]*P +
YVL[2]*P**2 +
YVL[3]*P**3 +
YVL[4]*T +
YVL[5]*T**2 +
YVL[6]*T**3 +
YVL[7]*P*T +
YVL[8]*P**2*T +
YVL[9]*P*T**2)
ZL0 = (ZVL[0] +
ZVL[1]*P +
ZVL[2]*P**2 +
ZVL[3]*P**3 +
ZVL[4]*T +
ZVL[5]*T**2 +
ZVL[6]*T**3 +
ZVL[7]*P*T +
ZVL[8]*P**2*T +
ZVL[9]*P*T**2)
YV0 = (YVV[0] +
YVV[1]*P +
YVV[2]*P**2 +
YVV[3]*P**3 +
YVV[4]*T +
YVV[5]*T**2 +
YVV[6]*T**3 +
YVV[7]*P*T +
YVV[8]*P**2*T +
YVV[9]*P*T**2)
ZV0 = (ZVV[0] +
ZVV[1]*P +
ZVV[2]*P**2 +
ZVV[3]*P**3 +
ZVV[4]*T +
ZVV[5]*T**2 +
ZVV[6]*T**3 +
ZVV[7]*P*T +
ZVV[8]*P**2*T +
ZVV[9]*P*T**2)
# derivatives
YL1 = (YVL[4] +
YVL[5]*T*2 +
YVL[6]*T**2*3 +
YVL[7]*P +
YVL[8]*P**2 +
YVL[9]*P*T*2)
ZL1 = (ZVL[4] +
ZVL[5]*T*2 +
ZVL[6]*T**2*3 +
ZVL[7]*P +
ZVL[8]*P**2 +
ZVL[9]*P*T*2)
YV1 = (YVV[4] +
YVV[5]*T*2 +
YVV[6]*T**2*3 +
YVV[7]*P +
YVV[8]*P**2 +
YVV[9]*P*T*2)
ZV1 = (ZVV[4] +
ZVV[5]*T*2 +
ZVV[6]*T**2*3 +
ZVV[7]*P +
ZVV[8]*P**2 +
ZVV[9]*P*T*2)
dens_l = (ZL0*YL1-YL0*ZL1)/ZL0**2
dens_v = (ZV0*YV1-YV0*ZV1)/ZV0**2
else: print('not a valid derivative'); return
# calculate co2 properties
if not derivative: k = 'viscosity'
elif derivative in ['P','pressure']: k = 'dvdp'
elif derivative in ['T','temperature']: k = 'dvdt'
if not co2Vars: return (dens_l,dens_v,np.array([]))
if not P.shape: P = np.array([P])
if not T.shape: T = np.array([T])
if P.size == 1 and not T.size == 1: P = P*np.ones((1,len(T)))[0]
elif T.size == 1 and not P.size == 1: T = T*np.ones((1,len(P)))[0]
# calculate bounding values of P
dens_c = []
for Pi, Ti in zip(P,T):
if Pi<=Pval[0]: p0 = Pval[0]; p1 = Pval[0]
elif Pi>=Pval[-1]: p0 = Pval[-1]; p1 = Pval[-1]
else:
p0 = Pval[0]
for p in Pval[1:]:
if Pi<=p:
p1 = p; break
else:
p0 = p
# calculate bounding values of T
if Ti<=Tval[0]: t0 = Tval[0]; t1 = Tval[0]
elif Ti>=Tval[-1]: t0 = Tval[-1]; t1 = Tval[-1]
else:
t0 = Tval[0]
for t in Tval[1:]:
if Ti<=t:
t1 = t; break
else:
t0 = t
# calculate four indices
dt0 = abs(Ti-t0); dt1 = abs(Ti-t1); dp0 = abs(Pi-p0); dp1 = abs(Pi-p1)
t0 = Tdict[t0]; t1 = Tdict[t1]; p0 = Pdict[p0]; p1 = Pdict[p1]
i1 = p0*tn+t0
i2 = p0*tn+t1
i3 = p1*tn+t0
i4 = p1*tn+t1
# locate value in array
v1 = arrays[k][i1]
v2 = arrays[k][i2]
v3 = arrays[k][i3]
v4 = arrays[k][i4]
dens_c.append((p0*(t1*v3+t0*v4)/(t1+t0) + p1*(t1*v1+t0*v2)/(t1+t0))/(p0+p1))
return (dens_l,dens_v,np.array(dens_c)*1e-6)
def sat(T):
"""Return saturation pressure and first derivative for given temperature.
:param T: Temperature (degC)
:type T: fl64
:returns: Two element tuple containing (saturation pressure, derivative).
"""
Y0 = (YSP[0]+
YSP[1]*T+
YSP[2]*T**2+
YSP[3]*T**3+
YSP[4]*T**4)
Z0 = (ZSP[0]+
ZSP[1]*T+
ZSP[2]*T**2+
ZSP[3]*T**3+
ZSP[4]*T**4)
Y1 = (YSP[1]+
YSP[2]*T*2+
YSP[3]*T**2*3+
YSP[4]*T**3*4)
Z1 = (ZSP[1]+
ZSP[2]*T*2+
ZSP[3]*T**2*3+
ZSP[4]*T**3*4)
satP = Y0/Z0
dsatPdT = (Z0*Y1-Y0*Z1)/Z0**2
return (satP, dsatPdT)
def tsat(P):
"""Return saturation temperature and first derivative for given pressure.
:param P: Pressure (degC)
:type P: fl64
:returns: Two element tuple containing (saturation temperature, derivative).
"""
Y0 = (YST[0]+
YST[1]*P+
YST[2]*P**2+
YST[3]*P**3+
YST[4]*P**4)
Z0 = (ZST[0]+
ZST[1]*P+
ZST[2]*P**2+
ZST[3]*P**3+
ZST[4]*P**4)
Y1 = (YST[1]+
YST[2]*P*2+
YST[3]*P**2*3+
YST[4]*P**3*4)
Z1 = (ZST[1]+
ZST[2]*P*2+
ZST[3]*P**2*3+
ZST[4]*P**3*4)
satT = Y0/Z0
dsatTdP = (Z0*Y1-Y0*Z1)/Z0**2
return (satT, dsatTdP)
def fluid_column(z,Tgrad,Tsurf,Psurf,iterations = 3):
'''Calculate thermodynamic properties of a column of fluid.
:param z: Vector of depths at which to return properties. If z does not begin at 0, this will be prepended.
:type z: ndarray
:param Tgrad: Temperature gradient in the column (degC / m).
:type Tgrad: fl64
:param Tsurf: Surface temperature (degC).
:type Tsurf: fl64
:param Psurf: Surface pressure (MPa).
:type Psurf:
:param iterations: Number of times to recalculate column pressure based on updated density.
:type iterations: int
:returns: Three element tuple containing (liquid, vapor, CO2) properties. Each contains a three column array corresponding to pressure, temperature, density, enthalpy and viscosity of the fluid.
'''
z = abs(np.array(z))
if z[-1] < z[0]: z = np.flipud(z)
if z[0] != 0: z = np.array([0,]+list(z))
if isinstance(Tgrad,str): # interpret Tgrad as a down well temperature profile
if not os.path.isfile(Tgrad): print('ERROR: cannot find temperature gradient file \''+Tgrad+'\'.'); return
tempfile = open(Tgrad,'r')
ln = tempfile.readline()
tempfile.close()
commaFlag = False; spaceFlag = False
if len(ln.split(',')) > 1: commaFlag = True
elif len(ln.split()) > 1: spaceFlag = True
if not commaFlag and not spaceFlag: print('ERROR: incorrect formatting for \''+Tgrad+'\'. Expect first column depth (m) and second column temperature (degC), either comma or space separated.'); return
if commaFlag: tempdat = np.loadtxt(Tgrad,delimiter=',')
else: tempdat = np.loadtxt(Tgrad)
zt = tempdat[:,0]; tt = tempdat[:,1]
T = np.interp(z,zt,tt)
else:
Tgrad = abs(Tgrad)
T = Tsurf + Tgrad*z
Pgrad = 800*9.81/1e6
Phgrad = 1000*9.81/1e6
if co2Vars:
Pco2 = Psurf + Pgrad*z
Ph = Psurf + Phgrad*z
for i in range(iterations):
if co2Vars:
rho = dens(Pco2,T)[2]
Pco2=np.array([(abs(np.trapz(rho[:i+1],z[:i+1]))*9.81/1e6)+Pco2[0] for i in range(len(rho))])
rho = dens(Ph,T)[0]
Ph=np.array([(abs(np.trapz(rho[:i+1],z[:i+1]))*9.81/1e6)+Ph[0] for i in range(len(rho))])
if co2Vars:
rho = dens(Pco2,T)
H = enth(Pco2,T)
mu = visc(Pco2,T)
rhoh = dens(Ph,T)
Hh = enth(Ph,T)
muh = visc(Ph,T)
if co2Vars:
return (np.array([Ph,T,rhoh[0],Hh[0],muh[0]]).T,np.array([Ph,T,rhoh[1],Hh[1],muh[1]]).T,np.array([Pco2,T,rho[2],H[2],mu[2]]).T)
else:
return (np.array([Ph,T,rhoh[0],Hh[0],muh[0]]).T,np.array([Ph,T,rhoh[1],Hh[1],muh[1]]).T,np.array([]))
def co2_dens_sat_line(derivative=''):
"""Return CO2 density, or derivatives with respect to temperature or pressure, for specified temperature and pressure near the CO2 saturation line.
:param P: Pressure (MPa).
:type P: fl64
:param T: Temperature (degC)
:type T: fl64
:param derivative: Supply 'T' or 'temperature' for derivatives with respect to temperature, or 'P' or 'pressure' for derivatives with respect to pressure.
:type T: str
:returns: Three element tuple containing (liquid, vapor, CO2) density or derivatives if requested.
"""
if not co2Vars:
print("Error: CO2 property table not found")
return
# calculate co2 properties
if not derivative: k = 'density'
elif derivative in ['P','pressure']: k = 'dddp'
elif derivative in ['T','temperature']: k = 'dddt'
print('P T liquid-'+k+' gas-'+k)
for p,t,l,g in zip(co2_sat_P, co2_sat_T, co2l_arrays[k], co2g_arrays[k]):
print(p,t,l,g)
return list(zip(co2_sat_P, co2_sat_T, co2l_arrays[k], co2g_arrays[k]))
def theta(i,del2,tau2):
return (1.e0-tau2)+(CAPA[i]*(((del2-1.e0)**2.e0)**(1.e0/(2.e0*BETA[i]))))
def psi(i,del2,tau2):
psi=-(CAPC[i]*((del2-1.e0)**2.e0))-(CAPD[i]*((tau2-1.e0)**2.e0))
return np.exp(psi)
def dpsiddel(i,del2,tau2):
psi1=psi(i,del2,tau2)
return -2.e0*CAPC[i]*(del2-1.e0)*psi1
def capdel(i,del2,tau2):
theta1=theta(i,del2,tau2)
return (theta1*theta1)+(CAPB[i]*(((del2-1.e0)**2.e0)**ACO2[i]))
def ddelbiddel(i,del2,tau2):
capdel1=capdel(i,del2,tau2)
ddd=dcapdelddel(i,del2,tau2)
return BCO2[i]*(capdel1**(BCO2[i]-1.e0))*ddd
def dcapdelddel(i,del2,tau2):
theta1=theta(i,del2,tau2)
out=((del2-1.e0)*((CAPA[i]*theta1*(2.e0/BETA[i])*
((del2-1.e0)**2.e0)**((1/(2.e0*BETA[i]))-1.e0))+(2.e0*CAPB[i]*ACO2[i]*
(((del2-1.e0)**2.e0)**(ACO2[i]-1.e0)))))
return out
def phir(del2, tau2):
r_helm = 0.0e0
for i in range(7):
r_helm = r_helm+(N[i]*(del2**D[i])*(tau2**TI[i]))
for i in range(7, 34):
r_helm = r_helm+(N[i]*(del2**D[i])*(tau2**TI[i])*np.exp(-del2**C[i]))
for i in range(34,39):
r_helm = (r_helm+(N[i]*(del2**D[i])*(tau2**TI[i])*
np.exp((-ALPHA[i]*((del2-IPSILON[i])**2))-(BETA[i]*
((tau2-GAMMA[i])**2)))))
for i in range(39,42):
psi1=psi(i,del2,tau2)
capdel1=capdel(i,del2,tau2)
r_helm=r_helm+(N[i]*(capdel1**BCO2[i])*del2*psi1)
return r_helm
def dphirddel(del2,tau2):
derdr_helm = 0.0e0
for i in range(7):
derdr_helm=derdr_helm+(N[i]*D[i]*(del2**(D[i]-1.e0))*(tau2**TI[i]))
for i in range(7,34):
derdr_helm=(derdr_helm+(N[i]*(del2**(D[i]-1.e0))*(tau2**TI[i])
*np.exp(-del2**C[i])*(D[i]-(C[i]*(del2**C[i])))))
for i in range(34, 39):
derdr_helm = (derdr_helm+(N[i]*(del2**D[i])*(tau2**TI[i])*
np.exp((-ALPHA[i]*((del2-IPSILON[i])**2.0e0))-(BETA[i]*
((tau2-GAMMA[i])**2.0e0)))*
((D[i]/del2)-(2.0e0*ALPHA[i]*(del2-IPSILON[i])))))
for i in range(39, 42):
psi1=psi(i,del2,tau2)
capdel1=capdel(i,del2,tau2)
dsidd=dpsiddel(i,del2,tau2)
ddelbdd=ddelbiddel(i,del2,tau2)
derdr_helm=(derdr_helm+(N[i]*(((capdel1**BCO2[i])*(psi1+
(del2*dsidd)))+(del2*psi1*ddelbdd))))
return derdr_helm
def co2_fugacity(var2,var3):
dell = var3/DENC
tau = TC/var2
fir = phir(dell,tau)
fird = dphirddel(dell,tau)
fg1 = np.exp(fir+(dell*fird)-np.log(1.e0+(dell*fird)))
return fg1
def mco2(P,T,nc=0.):
'''
P = pressure (MPa)
T = tempearture (degC)
nc = NaCl content (mol/kg)
'''
# calculate the dissolved CO2 concentration based on Duan (2003) eqn.
# need to calculate fugacity first based on the density.
c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15 = np.zeros(15)
# added fugacity calculation from Duan 2006 paper formulation
# equation 2.
P = np.max([P,1.e-3])
t = T + 273.15 # convert to K
p = P*10. # convert to bar
mol = nc#/58.443e3 # convert to mol/kg
if ((t>273.e0)and(t<573.e0)):
if (t<TC):
a1 = -7.0602087e0
a2 = 1.9391218e0
a3 = -1.6463597e0
a4 = -3.2995634
nu = 1.e0-(t/TC)
ps1 = a1*nu+(a2*(nu**1.5e0))+(a3*(nu**2.e0))+(a4*(nu**4.e0))
ps2 = a1+(1.5e0*a2*(nu**0.5e0))+(2.0e0*a3*nu)+(4.e0*a4*(nu**3.e0))
ps = ps1*TC/t
ps = np.exp(ps)*PC
p1 = ps
elif ((t>TC)and(t<405.e0)):
p1 = 75.e0+(t-TC)*1.25e0
elif (t>405.e0):
p1 = 200.e0
if( p<p1):
c1 = 1.e0
c2 = 4.7586835e-3
c3 = -3.3569963e-6
c5 = -1.3179356
c6 = -3.8389101e-6
c8 = 2.2815104e-3
elif (t<340.e0):
if(p<1000):
c1 = -7.1734882e-1
c2 = 1.5985379e-4
c3 = -4.9286471e-7
c6 = -2.7855285e-7
c7 = 1.1877015e-9
c12 = -96.539512
c13 = 4.4774938e-1
c14 = 101.81078e0
c15 = 5.3783879e-6
else:
c1 = -6.5129019e-2
c2 = -2.1429977e-4
c3 = -1.144493e-6
c6 = -1.1558081e-7
c7 = 1.195237e-9
c12 = -221.34306e0
c14 = 71.820393e0
c15 = 6.6089246e-6
elif (t<435.e0):
if (p<1000):
c1 = 5.0383896e0
c2 = -4.4257744e-3
c4 = 1.9572733e0
c6 = 2.4223436e-6
c8 = -9.3796135e-4
c9 = -1.502603e0
c10 = 3.027224e-3
c11 = -31.377342e0
c12 = -12.847063e0
c15 = -1.5056648e-5
else:
c1 = -16.063152e0
c2 = -2.705799e-3
c4 = 1.4119239e-1
c6 = 8.1132965e-7
c8 = -1.1453082e-4
c9 = 2.3895671e0
c10 = 5.0527457e-4
c11 = -17.76346e0
c12 = 985.92232e0
c15 = -5.4965256e-7
else:
c1 = -1.569349e-1
c2 = 4.4621407e-4
c3 = -9.1080591e-7
c6 = 1.0647399e-7
c7 = 2.4273357e-10
c9 = 3.5874255e-1
c10 = 6.3319710e-5
c11 = -249.89661e0
c14 = 888.768e0
c15 = -6.6348003e-7
fg = co2_fugacity(t,dens(P,T)[-1][0])
liq_cp = (28.9447706e0 + (-0.0354581768e0*t) + (-4770.67077e0/t)
+(1.02782768e-5*t*t) + (33.8126098/(630-t)) + (9.0403714e-3*p)
+(-1.14934031e-3*p*np.log(t)) + (-0.307405726*p/t)
+ (-0.0907301486*p/(630.e0-t))
+ (9.32713393e-4*p*p/((630.e0-t)**2.e0)))
# changed the above line not multiplied by 2 before
lambdaco2_na = (-0.411370585e0 + (6.07632013e-4*t)
+ (97.5347708e0/t) + (-0.0237622469e0*p/t)
+ (0.0170656236*p/(630.e0-t)) + (1.41335834e-5*t*np.log(p)))
tauco2_na_cl = (3.36389723e-4 + (-1.9829898e-5*t)
+ (2.1222083e-3*p/t) + (-5.24873303e-3*p/(630.e0-t)))
rhs = liq_cp + (2.e0*lambdaco2_na*mol) + (tauco2_na_cl*mol*mol)
# tt = (t-647.29)/647.29
# Ph20 = 220.85*t/647.29*(1.+CP[0]*(-tt)**1.9+CP[1]*t+CP[2]*t**2+CP[3]*t**3+CP[4]*t**4)
# yco2 = (p-Ph20)/p
yco2 = 1.
mco21 = yco2*fg*p*44.e-3
mco2 = mco21/(mco21*0+np.exp(rhs))
return mco2
def test_co2_sol():
P = 1.
T = 30.
m = mco2(P,T)
from matplotlib import pyplot as plt
f,ax = plt.subplots(1,1)
P = np.linspace(1,200,101)
# ax.plot(P, [mco2(Pi,573.15-273.15, 1.09)/44.e-3 for Pi in P], 'k-')
# ax.plot(P, [mco2(Pi,573.15-273.15, 4.)/44.e-3 for Pi in P], 'b-')
T = np.linspace(280,520,50)
for P in [5,10,20,50,100]:
ax.plot(T, [mco2(P,Ti-273.15,0)/44.e-3 for Ti in T], 'k-')
plt.show()
def run_tests():
test_co2_sol()
if __name__ == "__main__":
run_tests() | lgpl-2.1 |
metpy/MetPy | examples/Four_Panel_Map.py | 6 | 4683 | # Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Four Panel Map
===============
By reading model output data from a netCDF file, we can create a four panel plot showing:
* 300 hPa heights and winds
* 500 hPa heights and absolute vorticity
* Surface temperatures
* Precipitable water
"""
###########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
import xarray as xr
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
###########################################
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
###########################################
# Function used to create the map subplots
def plot_background(ax):
ax.set_extent([235., 290., 20., 55.])
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.5)
ax.add_feature(cfeature.STATES, linewidth=0.5)
ax.add_feature(cfeature.BORDERS, linewidth=0.5)
return ax
###########################################
# Open the example netCDF data
ds = xr.open_dataset(get_test_data('gfs_output.nc', False))
print(ds)
###########################################
# Combine 1D latitude and longitudes into a 2D grid of locations
lon_2d, lat_2d = np.meshgrid(ds['lon'], ds['lat'])
###########################################
# Pull out the data
vort_500 = ds['vort_500'][0]
surface_temp = ds['temp'][0]
precip_water = ds['precip_water'][0]
winds_300 = ds['winds_300'][0]
###########################################
# Do unit conversions to what we wish to plot
vort_500 = vort_500 * 1e5
surface_temp.metpy.convert_units('degF')
precip_water.metpy.convert_units('inches')
winds_300.metpy.convert_units('knots')
###########################################
# Smooth the height data
heights_300 = ndimage.gaussian_filter(ds['heights_300'][0], sigma=1.5, order=0)
heights_500 = ndimage.gaussian_filter(ds['heights_500'][0], sigma=1.5, order=0)
###########################################
# Create the figure and plot background on different axes
fig, axarr = plt.subplots(nrows=2, ncols=2, figsize=(20, 13), constrained_layout=True,
subplot_kw={'projection': crs})
add_metpy_logo(fig, 140, 120, size='large')
axlist = axarr.flatten()
for ax in axlist:
plot_background(ax)
# Upper left plot - 300-hPa winds and geopotential heights
cf1 = axlist[0].contourf(lon_2d, lat_2d, winds_300, cmap='cool', transform=ccrs.PlateCarree())
c1 = axlist[0].contour(lon_2d, lat_2d, heights_300, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
axlist[0].clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[0].set_title('300-hPa Wind Speeds and Heights', fontsize=16)
cb1 = fig.colorbar(cf1, ax=axlist[0], orientation='horizontal', shrink=0.74, pad=0)
cb1.set_label('knots', size='x-large')
# Upper right plot - 500mb absolute vorticity and geopotential heights
cf2 = axlist[1].contourf(lon_2d, lat_2d, vort_500, cmap='BrBG', transform=ccrs.PlateCarree(),
zorder=0, norm=plt.Normalize(-32, 32))
c2 = axlist[1].contour(lon_2d, lat_2d, heights_500, colors='k', linewidths=2,
transform=ccrs.PlateCarree())
axlist[1].clabel(c2, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[1].set_title('500-hPa Absolute Vorticity and Heights', fontsize=16)
cb2 = fig.colorbar(cf2, ax=axlist[1], orientation='horizontal', shrink=0.74, pad=0)
cb2.set_label(r'$10^{-5}$ s$^{-1}$', size='x-large')
# Lower left plot - surface temperatures
cf3 = axlist[2].contourf(lon_2d, lat_2d, surface_temp, cmap='YlOrRd',
transform=ccrs.PlateCarree(), zorder=0)
axlist[2].set_title('Surface Temperatures', fontsize=16)
cb3 = fig.colorbar(cf3, ax=axlist[2], orientation='horizontal', shrink=0.74, pad=0)
cb3.set_label(u'\N{DEGREE FAHRENHEIT}', size='x-large')
# Lower right plot - precipitable water entire atmosphere
cf4 = axlist[3].contourf(lon_2d, lat_2d, precip_water, cmap='Greens',
transform=ccrs.PlateCarree(), zorder=0)
axlist[3].set_title('Precipitable Water', fontsize=16)
cb4 = fig.colorbar(cf4, ax=axlist[3], orientation='horizontal', shrink=0.74, pad=0)
cb4.set_label('in.', size='x-large')
# Set height padding for plots
fig.set_constrained_layout_pads(w_pad=0., h_pad=0.1, hspace=0., wspace=0.)
# Set figure title
fig.suptitle(ds['time'][0].dt.strftime('%d %B %Y %H:%MZ'), fontsize=24)
# Display the plot
plt.show()
| bsd-3-clause |
andyk/cluster-scheduler-simulator | src/main/python/graphing-scripts/utils.py | 1 | 3341 | # Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from matplotlib import use, rc
use('Agg')
import matplotlib.pyplot as plt
# plot saving utility function
def writeout(filename_base, formats = ['pdf']):
for fmt in formats:
plt.savefig("%s.%s" % (filename_base, fmt), format=fmt, bbox_inches='tight')
# plt.savefig("%s.%s" % (filename_base, fmt), format=fmt)
def set_leg_fontsize(size):
rc('legend', fontsize=size)
def set_paper_rcs():
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],
'serif':['Helvetica'],'size':8})
rc('text', usetex=True)
rc('legend', fontsize=7)
rc('figure', figsize=(3.33,2.22))
# rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
rc('axes', linewidth=0.5)
rc('lines', linewidth=0.5)
def set_rcs():
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'],
'serif':['Times'],'size':12})
rc('text', usetex=True)
rc('legend', fontsize=7)
rc('figure', figsize=(6,4))
rc('figure.subplot', left=0.10, top=0.90, bottom=0.12, right=0.95)
rc('axes', linewidth=0.5)
rc('lines', linewidth=0.5)
def append_or_create(d, i, e):
if not i in d:
d[i] = [e]
else:
d[i].append(e)
# Append e to the array at position (i,k).
# d - a dictionary of dictionaries of arrays, essentially a 2d dictionary.
# i, k - essentially a 2 element tuple to use as the key into this 2d dict.
# e - the value to add to the array indexed by key (i,k).
def append_or_create_2d(d, i, k, e):
if not i in d:
d[i] = {k : [e]}
elif k not in d[i]:
d[i][k] = [e]
else:
d[i][k].append(e)
def cell_to_anon(cell):
if cell == 'A':
return 'A'
elif cell == 'B':
return 'B'
elif cell == 'C':
return 'C'
elif cell == 'synth':
return 'SYNTH'
else:
print "unknown cell!?"
raise Exception
| bsd-3-clause |
DGrady/pandas | pandas/core/series.py | 2 | 102152 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import types
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_bool,
is_integer, is_integer_dtype,
is_float_dtype,
is_extension_type, is_datetimetz,
is_datetimelike,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_list_like,
is_hashable,
is_iterator,
is_dict_like,
is_scalar,
_is_unorderable_exception,
_ensure_platform_int,
pandas_dtype)
from pandas.core.dtypes.generic import ABCSparseArray, ABCDataFrame
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_convert_platform,
maybe_cast_to_datetime, maybe_castable)
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.common import (is_bool_indexer,
_default_index,
_asarray_tuplesafe,
_values_from_object,
_try_sort,
_maybe_match_name,
SettingWithCopyError,
_maybe_box_datetimelike,
_dict_compat,
standardize_mapping)
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
Float64Index, _ensure_index)
from pandas.core.indexing import check_bool_indexer, maybe_convert_indices
from pandas.core import generic, base
from pandas.core.internals import SingleBlockManager
from pandas.core.categorical import Categorical, CategoricalAccessor
import pandas.core.strings as strings
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.period import PeriodIndex
from pandas import compat
from pandas.io.formats.terminal import get_terminal_size
from pandas.compat import zip, u, OrderedDict, StringIO
from pandas.compat.numpy import function as nv
import pandas.core.ops as ops
import pandas.core.algorithms as algorithms
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.io.formats.format as fmt
from pandas.util._decorators import Appender, deprecate_kwarg, Substitution
from pandas.util._validators import validate_bool_kwarg
from pandas._libs import index as libindex, tslib as libts, lib, iNaT
from pandas.core.config import get_option
import pandas.plotting._core as gfx
__all__ = ['Series']
_shared_doc_kwargs = dict(
axes='index', klass='Series', axes_single_arg="{0, 'index'}",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
unique='np.ndarray', duplicated='Series',
optional_by='',
versionadded_to_excel='\n .. versionadded:: 0.20.0\n')
# see gh-16971
def remove_na(arr):
"""
DEPRECATED : this function will be removed in a future version.
"""
warnings.warn("remove_na is deprecated and is a private "
"function. Do not use.", FutureWarning, stacklevel=2)
return remove_na_arraylike(arr)
def _coerce_method(converter):
""" install the scalar coercion methods """
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to "
"{0}".format(str(converter)))
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, strings.StringAccessorMixin,
generic.NDFrame,):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex(len(data)) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
_metadata = ['name']
_accessors = frozenset(['dt', 'cat', 'str'])
_allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
copy=False, fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = _ensure_index(index)
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError("initializing a Series from a "
"MultiIndex is not supported")
elif isinstance(data, Index):
# need to copy to avoid aliasing issues
if name is None:
name = data.name
data = data._to_embed(keep_tz=True)
copy = True
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, Series):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
if index is None:
if isinstance(data, OrderedDict):
index = Index(data)
else:
index = Index(_try_sort(data))
try:
if isinstance(index, DatetimeIndex):
if len(data):
# coerce back to datetime objects for lookup
data = _dict_compat(data)
data = lib.fast_multiget(data,
index.asobject.values,
default=np.nan)
else:
data = np.nan
# GH #12169
elif isinstance(index, (PeriodIndex, TimedeltaIndex)):
data = ([data.get(i, np.nan) for i in index]
if data else np.nan)
else:
data = lib.fast_multiget(data, index.values,
default=np.nan)
except TypeError:
data = ([data.get(i, np.nan) for i in index]
if data else np.nan)
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
elif isinstance(data, Categorical):
# GH12574: Allow dtype=category only, otherwise error
if ((dtype is not None) and
not is_categorical_dtype(dtype)):
raise ValueError("cannot specify a dtype with a "
"Categorical unless "
"dtype='category'")
elif (isinstance(data, types.GeneratorType) or
(compat.PY3 and isinstance(data, map))):
data = list(data)
elif isinstance(data, (set, frozenset)):
raise TypeError("{0!r} type is unordered"
"".format(data.__class__.__name__))
else:
# handle sparse passed here (and force conversion)
if isinstance(data, ABCSparseArray):
data = data.to_dense()
if index is None:
if not is_list_like(data):
data = [data]
index = _default_index(len(data))
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, raise_on_error=False,
copy=copy)
elif copy:
data = data.copy()
else:
data = _sanitize_array(data, index, dtype, copy,
raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
self.name = name
self._set_axis(0, index, fastpath=True)
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
# return a sparse series here
if isinstance(arr, ABCSparseArray):
from pandas.core.sparse.series import SparseSeries
cls = SparseSeries
return cls(arr, index=index, name=name, dtype=dtype, copy=copy,
fastpath=fastpath)
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
_index = None
def _set_axis(self, axis, labels, fastpath=False):
""" override generic, we want to set the _typ here """
if not fastpath:
labels = _ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here becuase we changed the index
if fastpath:
self._data.set_axis(axis, labels)
except (libts.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
self._set_subtyp(is_all_dates)
object.__setattr__(self, '_index', labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'time_series')
else:
object.__setattr__(self, '_subtyp', 'series')
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value is not None and not is_hashable(value):
raise TypeError('Series.name must be a hashable type')
object.__setattr__(self, '_name', value)
# ndarray compatibility
@property
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def dtypes(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def ftype(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def ftypes(self):
""" return if the data is sparse|dense """
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray or ndarray-like
depending on the dtype
Returns
-------
arr : numpy.ndarray or ndarray-like
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
[a, a, b, c]
Categories (3, object): [a, b, c]
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._data.external_values()
@property
def _values(self):
""" return the internal repr of this data """
return self._data.internal_values()
def _formatting_values(self):
"""Return the values that can be formatted (used by SeriesFormatter
and DataFrameFormatter)
"""
return self._data.formatting_values()
def get_values(self):
""" same as values (but handles sparseness conversions); is a view """
return self._data.get_values()
@property
def asobject(self):
"""
return object Series which contains boxed values
*this is an internal non-public method*
"""
return self._data.asobject
# ops
def ravel(self, order='C'):
"""
Return the flattened underlying data as an ndarray
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
def compress(self, condition, *args, **kwargs):
"""
Return selected slices of an array along given axis as a Series
See also
--------
numpy.ndarray.compress
"""
nv.validate_compress(args, kwargs)
return self[condition]
def nonzero(self):
"""
Return the indices of the elements that are non-zero
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatability with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
See Also
--------
numpy.nonzero
"""
return self._values.nonzero()
def put(self, *args, **kwargs):
"""
Applies the `put` method to its `values` attribute
if it has one.
See also
--------
numpy.ndarray.put
"""
self._values.put(*args, **kwargs)
def __len__(self):
"""
return the length of the Series
"""
return len(self._data)
def view(self, dtype=None):
return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self)
def __array__(self, result=None):
"""
the array interface, return my values
"""
return self.get_values()
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._constructor(result, index=self.index,
copy=False).__finalize__(self)
def __array_prepare__(self, result, context=None):
"""
Gets called prior to a ufunc
"""
# nice error message for non-ufunc types
if context is not None and not isinstance(self._values, np.ndarray):
obj = context[1][0]
raise TypeError("{obj} with dtype {dtype} cannot perform "
"the numpy op {op}".format(
obj=type(obj).__name__,
dtype=getattr(obj, 'dtype', None),
op=context[0].__name__))
return result
# complex
@property
def real(self):
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state['_data']
self.name = state['name']
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
"""Return a list of the row axis labels"""
return [self.index]
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the Series by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
try:
# dispatch to the values if we need
values = self._values
if isinstance(values, np.ndarray):
return libindex.get_value_at(values, i)
else:
return values[i]
except IndexError:
raise
except:
if isinstance(i, slice):
indexer = self.index._convert_slice_indexer(i, kind='iloc')
return self._get_values(indexer)
else:
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return libindex.get_value_at(self, i)
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj,
kind=kind or 'getitem')
return self._get_values(slobj)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
try:
result = self.index.get_value(self, key)
if not is_scalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if we have a non-unique index here
# otherwise have inline ndarray/lists
if not self.index.is_unique:
result = self._constructor(
result, index=[key] * len(result),
dtype=self.dtype).__finalize__(self)
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key,
kind='getitem')
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError('Indexing a Series with DataFrame is not '
'supported, use the appropriate DataFrame column')
else:
if isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.is_integer() or self.index.is_floating():
return self.reindex(key)
else:
return self._get_values(key)
elif key_type == 'boolean':
return self._get_values(key)
else:
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.loc[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if any(k is None for k in key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError('Can only tuple-index with a MultiIndex')
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer],
index=new_index).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self._values[indexer]
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except (SettingWithCopyError):
raise
except (KeyError, ValueError):
values = self._values
if (is_integer(key) and
not self.index.inferred_type == 'integer'):
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif com.is_bool_indexer(key):
pass
elif is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if isna(value):
value = iNaT
try:
self.index._engine.set_value(self._values, key,
value)
return
except TypeError:
pass
self.loc[key] = value
return
except TypeError as e:
if (isinstance(key, tuple) and
not isinstance(self.index, MultiIndex)):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if _is_unorderable_exception(e):
raise IndexError(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self._where(~key, value, inplace=True)
return
except InvalidIndexError:
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self._values
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind='getitem')
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if not isinstance(key, (list, Series, np.ndarray, Series)):
try:
key = list(key)
except:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key)
if key_type == 'integer':
if self.index.inferred_type == 'integer':
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == 'boolean':
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = _asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError('%s not contained in the index' % str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
@deprecate_kwarg(old_arg_name='reps', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Series. Refer to `numpy.ndarray.repeat`
for more information about the `repeats` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def reshape(self, *args, **kwargs):
"""
.. deprecated:: 0.19.0
Calling this method will raise an error. Please call
``.values.reshape(...)`` instead.
return an ndarray with the values shape
if the specified shape matches exactly the current shape, then
return self (for compat)
See also
--------
numpy.ndarray.reshape
"""
warnings.warn("reshape is deprecated and will raise "
"in a subsequent release. Please use "
".values.reshape(...) instead", FutureWarning,
stacklevel=2)
if len(args) == 1 and hasattr(args[0], '__iter__'):
shape = args[0]
else:
shape = args
if tuple(shape) == self.shape:
# XXX ignoring the "order" keyword.
nv.validate_reshape(tuple(), kwargs)
return self
return self._values.reshape(shape, **kwargs)
def get_value(self, label, takeable=False):
"""
Quickly retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
if takeable is True:
return _maybe_box_datetimelike(self._values[label])
return self.index.get_value(self._values, label)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
series : Series
If label is contained, will be reference to calling Series,
otherwise a new object
"""
try:
if takeable:
self._values[label] = value
else:
self.index._engine.set_value(self._values, label, value)
return self
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return self
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Analogous to the :meth:`pandas.DataFrame.reset_index` function, see
docstring there.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns
name : object, default None
The name of the column corresponding to the Series values
inplace : boolean, default False
Modify the Series in place (do not create a new object)
Returns
----------
resetted : DataFrame, or Series if drop == True
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'],
... name = 'idx'))
>>> s.reset_index()
index 0
0 0 1
1 1 2
2 2 3
3 3 4
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo',
... 'foo', 'qux', 'qux']),
... np.array(['one', 'two', 'one', 'two', 'one', 'two',
... 'one', 'two'])]
>>> s2 = pd.Series(
... np.random.randn(8),
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
>>> s2.reset_index(level='a')
a 0
b
one bar -0.286320
two bar -0.587934
one baz 0.710491
two baz -1.429006
one foo 0.790700
two foo 0.824863
one qux -0.718963
two qux -0.055028
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if drop:
new_index = _default_index(len(self))
if level is not None and isinstance(self.index, MultiIndex):
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(self._values.copy(),
index=new_index).__finalize__(self)
elif inplace:
raise TypeError('Cannot reset_index inplace on a Series '
'to create a DataFrame')
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
width, height = get_terminal_size()
max_rows = (height if get_option("display.max_rows") == 0 else
get_option("display.max_rows"))
show_dimensions = get_option("display.show_dimensions")
self.to_string(buf=buf, name=self.name, dtype=self.dtype,
max_rows=max_rows, length=show_dimensions)
result = buf.getvalue()
return result
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True,
index=True, length=False, dtype=False, name=False,
max_rows=None):
"""
Render a string representation of the Series
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header: boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
"""
formatter = fmt.SeriesFormatter(self, name=name, length=length,
header=header, index=index,
dtype=dtype, na_rep=na_rep,
float_format=float_format,
max_rows=max_rows)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, compat.text_type):
raise AssertionError("result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__))
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, 'w') as f:
f.write(result)
def __iter__(self):
""" provide iteration over the values of the Series
box values if necessary """
if is_datetimelike(self):
return (_maybe_box_datetimelike(x) for x in self._values)
else:
return iter(self._values)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return zip(iter(self.index), iter(self))
if compat.PY3: # pragma: no cover
items = iteritems
# ----------------------------------------------------------------------
# Misc public methods
def keys(self):
"""Alias for index"""
return self.index
def tolist(self):
""" Convert Series to a nested list """
return list(self.asobject)
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
value_dict : collections.Mapping
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = standardize_mapping(into)
return into_c(compat.iteritems(self))
def to_frame(self, name=None):
"""
Convert Series to DataFrame
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
data_frame : DataFrame
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse.series import SparseSeries
return SparseSeries(self, kind=kind,
fill_value=fill_value).__finalize__(self)
def _set_name(self, name, inplace=False):
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
whether to modify `self` directly or return a copy
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
ser = self if inplace else self.copy()
ser.name = name
return ser
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series
Returns
-------
nobs : int or Series (if level specified)
"""
from pandas.core.index import _get_na_value
if level is None:
return notna(_values_from_object(self)).sum()
if isinstance(level, compat.string_types):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
lab = np.array(self.index.labels[level], subok=False, copy=True)
mask = lab == -1
if mask.any():
lab[mask] = cnt = len(lev)
lev = lev.insert(cnt, _get_na_value(lev.dtype.type))
obs = lab[notna(self.values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev,
dtype='int64').__finalize__(self)
def mode(self):
"""Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Returns
-------
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self)
@Appender(base._shared_docs['unique'] % _shared_doc_kwargs)
def unique(self):
result = super(Series, self).unique()
if is_datetime64tz_dtype(self.dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
result = result.asobject.values
return result
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
return super(Series, self).drop_duplicates(keep=keep, inplace=inplace)
@Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)
def duplicated(self, keep='first'):
return super(Series, self).duplicated(keep=keep)
def idxmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Index of first occurrence of minimum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmin : Index of minimum of values
Notes
-----
This method is the Series version of ``ndarray.argmin``.
See Also
--------
DataFrame.idxmin
numpy.ndarray.argmin
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Index of first occurrence of maximum of values.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values
Returns
-------
idxmax : Index of maximum of values
Notes
-----
This method is the Series version of ``ndarray.argmax``.
See Also
--------
DataFrame.idxmax
numpy.ndarray.argmax
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
i = nanops.nanargmax(_values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = idxmin
argmax = idxmax
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
numpy.around
DataFrame.round
"""
nv.validate_round(args, kwargs)
result = _values_from_object(self).round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile : float or Series
if ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles.
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
self._check_percentile(q)
result = self._data.quantile(qs=q, interpolation=interpolation)
if is_list_like(q):
return self._constructor(result,
index=Float64Index(q),
name=self.name)
else:
# scalar
return result
def corr(self, other, method='pearson', min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values
Parameters
----------
other : Series
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
correlation : float
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancorr(this.values, other.values, method=method,
min_periods=min_periods)
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values
Parameters
----------
other : Series
min_periods : int, optional
Minimum number of observations needed to have a valid result
Returns
-------
covariance : float
Normalized by N-1 (unbiased estimator).
"""
this, other = self.align(other, join='inner', copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values,
min_periods=min_periods)
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : Series
"""
result = algorithms.diff(_values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Lag-N autocorrelation
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
autocorr : float
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Matrix multiplication with DataFrame or inner-product with Series
objects
Parameters
----------
other : Series or DataFrame
Returns
-------
dot_product : scalar or Series
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if (len(common) > len(self.index) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=other.columns).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
@Substitution(klass='Series')
@Appender(base._shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='v', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if sorter is not None:
sorter = _ensure_platform_int(sorter)
return self._values.searchsorted(Series(value)._values,
side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
.. versionadded: 0.19.0
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Returns
-------
appended : Series
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
combined : Series
"""
if not isinstance(other, Series):
raise AssertionError('Other operand must be Series')
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join='outer',
copy=False)
new_index = this.index
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isna(this_vals)
other_mask = isna(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
with np.errstate(all='ignore'):
result = func(this_vals, other_vals)
name = _maybe_match_name(self, other)
result = self._constructor(result, index=new_index, name=name)
result = result.__finalize__(self)
if name is None:
# When name is None, __finalize__ overwrites current name
result.name = None
return result
def combine(self, other, func, fill_value=np.nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or
the other
Parameters
----------
other : Series or scalar value
func : function
fill_value : scalar value
Returns
-------
result : Series
"""
if isinstance(other, Series):
new_index = self.index.union(other.index)
new_name = _maybe_match_name(self, other)
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all='ignore'):
new_values[i] = func(lv, rv)
else:
new_index = self.index
with np.errstate(all='ignore'):
new_values = func(self._values, other)
new_name = self.name
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
# TODO: do we need name?
name = _maybe_match_name(self, other) # noqa
rs_vals = com._where_compat(isna(this), other._values, this._values)
return self._constructor(rs_vals, index=new_index).__finalize__(self)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index
Parameters
----------
other : Series
"""
other = other.reindex_like(self)
mask = notna(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
@Appender(generic._shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError("This Series is a view of some other array, to "
"sort in-place you must create a copy")
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind='quicksort')
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = _default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError('Length of ascending (%d) must be 1 '
'for Series' % (len(ascending)))
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError('ascending must be boolean')
if not ascending:
argsorted = argsorted[::-1]
if na_position == 'last':
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == 'first':
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
@Appender(generic._shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
# TODO: this can be combined with DataFrame.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
index = self.index
if level:
new_index, indexer = index.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and index.is_monotonic_increasing) or
(not ascending and index.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(index, kind=kind, ascending=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
new_index = index.take(indexer)
new_index = new_index._sort_levels_monotonic()
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values
Parameters
----------
axis : int (can only be zero)
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : ignored
Returns
-------
argsorted : Series, with -1 indicated where nan values are present
See also
--------
numpy.ndarray.argsort
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name,
dtype='int64')
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result,
index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index,
dtype='int64').__finalize__(self)
def nlargest(self, n=5, keep='first'):
"""
Return the largest `n` elements.
Parameters
----------
n : int
Return this many descending sorted values
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
top_n : Series
The n largest values in the Series, in sorted order
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
See Also
--------
Series.nsmallest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(10**6))
>>> s.nlargest(10) # only sorts up to the N requested
219921 4.644710
82124 4.608745
421689 4.564644
425277 4.447014
718691 4.414137
43154 4.403520
283187 4.313922
595519 4.273635
503969 4.250236
121637 4.240952
dtype: float64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep='first'):
"""
Return the smallest `n` elements.
Parameters
----------
n : int
Return this many ascending sorted values
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
bottom_n : Series
The n smallest values in the Series, in sorted order
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
See Also
--------
Series.nlargest
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> s = pd.Series(np.random.randn(10**6))
>>> s.nsmallest(10) # only sorts up to the N requested
288532 -4.954580
732345 -4.835960
64803 -4.812550
446457 -4.609998
501225 -4.483945
669476 -4.472935
973615 -4.401699
621279 -4.355126
773916 -4.347355
359919 -4.331927
dtype: float64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
DEPRECATED: use :meth:`Series.sort_index`
Sort Series with MultiIndex by chosen level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int or level name, default None
ascending : bool, default True
Returns
-------
sorted : Series
See Also
--------
Series.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level=...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, ascending=ascending,
sort_remaining=sort_remaining)
def swaplevel(self, i=-2, j=-1, copy=True):
"""
Swap levels i and j in a MultiIndex
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : Series
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index,
copy=copy).__finalize__(self)
def reorder_levels(self, order):
"""
Rearrange index levels using input order. May not drop or duplicate
levels
Parameters
----------
order : list of int representing new level order.
(reference level by number or key)
axis : where to reorder levels
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception('Can only reorder levels on a hierarchical axis.')
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def unstack(self, level=-1, fill_value=None):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'], ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function)
Parameters
----------
arg : function, dict, or Series
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
y : Series
same index as caller
Examples
--------
Map inputs to outputs (both of type `Series`)
>>> x = pd.Series([1,2,3], index=['one', 'two', 'three'])
>>> x
one 1
two 2
three 3
dtype: int64
>>> y = pd.Series(['foo', 'bar', 'baz'], index=[1,2,3])
>>> y
1 foo
2 bar
3 baz
>>> x.map(y)
one foo
two bar
three baz
If `arg` is a dictionary, return a new Series with values converted
according to the dictionary's mapping:
>>> z = {1: 'A', 2: 'B', 3: 'C'}
>>> x.map(z)
one A
two B
three C
Use na_action to control whether NA values are affected by the mapping
function.
>>> s = pd.Series([1, 2, 3, np.nan])
>>> s2 = s.map('this is a string {}'.format, na_action=None)
0 this is a string 1.0
1 this is a string 2.0
2 this is a string 3.0
3 this is a string nan
dtype: object
>>> s3 = s.map('this is a string {}'.format, na_action='ignore')
0 this is a string 1.0
1 this is a string 2.0
2 this is a string 3.0
3 NaN
dtype: object
See Also
--------
Series.apply: For applying more complex functions on a Series
DataFrame.apply: Apply a function row-/column-wise
DataFrame.applymap: Apply a function elementwise on a whole DataFrame
Notes
-----
When `arg` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``:
>>> from collections import Counter
>>> counter = Counter()
>>> counter['bar'] += 1
>>> y.map(counter)
1 0
2 1
3 0
dtype: int64
"""
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.asobject
if na_action == 'ignore':
def map_f(values, f):
return lib.map_infer_mask(values, f,
isna(values).view(np.uint8))
else:
map_f = lib.map_infer
if isinstance(arg, dict):
if hasattr(arg, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert arg to a lookup function (GH #15999).
dict_with_default = arg
arg = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an indexed series for efficiency.
arg = self._constructor(arg, index=arg.keys())
if isinstance(arg, Series):
# arg is a Series
indexer = arg.index.get_indexer(values)
new_values = algorithms.take_1d(arg._values, indexer)
else:
# arg is a function
new_values = map_f(values, arg)
return self._constructor(new_values,
index=self.index).__finalize__(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
return self
_agg_doc = dedent("""
Examples
--------
>>> s = Series(np.random.randn(10))
>>> s.agg('min')
-1.3018049988556679
>>> s.agg(['min', 'max'])
min -1.301805
max 1.127688
dtype: float64
See also
--------
pandas.Series.apply
pandas.Series.transform
""")
@Appender(_agg_doc)
@Appender(generic._shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
# we can be called from an inner function which
# passes this meta-data
kwargs.pop('_axis', None)
kwargs.pop('_level', None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, becuase
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.apply(func, *args, **kwargs)
except (ValueError, AttributeError, TypeError):
result = func(self, *args, **kwargs)
return result
agg = aggregate
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series. Can be ufunc (a NumPy function
that applies to the entire Series) or a Python function that only works
on single values
Parameters
----------
func : function
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
args : tuple
Positional arguments to pass to function in addition to the value
Additional keyword arguments will be passed as keywords to the function
Returns
-------
y : Series or DataFrame if func returns a Series
See also
--------
Series.map: For element-wise operations
Series.agg: only perform aggregating type operations
Series.transform: only perform transformating type operations
Examples
--------
Create a series with typical summer temperatures for each city.
>>> import pandas as pd
>>> import numpy as np
>>> series = pd.Series([20, 21, 12], index=['London',
... 'New York','Helsinki'])
>>> series
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x**2
>>> series.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> series.apply(lambda x: x**2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x-custom_value
>>> series.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x+=kwargs[month]
... return x
>>> series.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> series.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype,
index=self.index).__finalize__(self)
# dispatch to agg
if isinstance(func, (list, dict)):
return self.aggregate(func, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
return self._try_aggregate_string_function(func, *args, **kwds)
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
with np.errstate(all='ignore'):
if isinstance(f, np.ufunc):
return f(self)
# row-wise access
if is_extension_type(self.dtype):
mapped = self._values.map(f)
else:
values = self.asobject
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
from pandas.core.frame import DataFrame
return DataFrame(mapped.tolist(), index=self.index)
else:
return self._constructor(mapped,
index=self.index).__finalize__(self)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
"""
perform a reduction operation
if we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object
"""
delegate = self._values
if isinstance(delegate, np.ndarray):
# Validate that 'axis' is consistent with Series's single axis.
self._get_axis_number(axis)
if numeric_only:
raise NotImplementedError('Series.{0} does not implement '
'numeric_only.'.format(name))
with np.errstate(all='ignore'):
return op(delegate, skipna=skipna, **kwds)
return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type, **kwds)
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
# be subclass-friendly
new_values = algorithms.take_1d(self.get_values(), indexer)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
""" check if we do need a multi reindex; this is for compat with
higher dims
"""
return False
@Appender(generic._shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(Series, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, **kwargs):
kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
'inplace')
non_mapping = is_scalar(index) or (is_list_like(index) and
not is_dict_like(index))
if non_mapping:
return self._set_name(index, inplace=kwargs.get('inplace'))
return super(Series, self).rename(index=index, **kwargs)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, **kwargs):
return super(Series, self).reindex(index=index, **kwargs)
@Appender(generic._shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Series, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(Series, self).shift(periods=periods, freq=freq, axis=axis)
def reindex_axis(self, labels, axis=0, **kwargs):
""" for compatibility with higher dims """
if axis != 0:
raise ValueError("cannot reindex series on non-zero axis!")
return self.reindex(index=labels, **kwargs)
def memory_usage(self, index=True, deep=False):
"""Memory usage of the Series
Parameters
----------
index : bool
Specifies whether to include memory usage of Series index
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
scalar bytes of memory consumed
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
v = super(Series, self).memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
def take(self, indices, axis=0, convert=True, is_copy=False, **kwargs):
"""
return Series corresponding to requested indices
Parameters
----------
indices : list / array of ints
convert : translate negative to positive indices (default)
Returns
-------
taken : Series
See also
--------
numpy.ndarray.take
"""
if kwargs:
nv.validate_take(tuple(), kwargs)
# check/convert indicies here
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
indices = _ensure_platform_int(indices)
new_index = self.index.take(indices)
new_values = self._values.take(indices)
return (self._constructor(new_values, index=new_index, fastpath=True)
.__finalize__(self))
def isin(self, values):
"""
Return a boolean :class:`~pandas.Series` showing whether each element
in the :class:`~pandas.Series` is exactly contained in the passed
sequence of ``values``.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
``list`` of one element.
.. versionadded:: 0.18.1
Support for values as a set
Returns
-------
isin : Series (bool dtype)
Raises
------
TypeError
* If ``values`` is a string
See Also
--------
pandas.DataFrame.isin
Examples
--------
>>> s = pd.Series(list('abc'))
>>> s.isin(['a', 'c', 'e'])
0 True
1 False
2 True
dtype: bool
Passing a single string as ``s.isin('a')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['a'])
0 True
1 False
2 False
dtype: bool
"""
result = algorithms.isin(_values_from_object(self), values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right. NA values
will be treated as False
Parameters
----------
left : scalar
Left boundary
right : scalar
Right boundary
Returns
-------
is_between : Series
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@classmethod
def from_csv(cls, path, sep=',', parse_dates=True, header=None,
index_col=0, encoding=None, infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a time Series.
This method only differs from :func:`pandas.read_csv` in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `header` is ``None`` instead of ``0`` (the first row is not used as
the column names)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
With :func:`pandas.read_csv`, the option ``squeeze=True`` can be used
to return a Series like ``from_csv``.
Parameters
----------
path : string file path or file handle / StringIO
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default None
Row to use as header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : Series
"""
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=header, index_col=index_col,
sep=sep, parse_dates=parse_dates,
encoding=encoding,
infer_datetime_format=infer_datetime_format)
result = df.iloc[:, 0]
if header is None:
result.index.name = result.name = None
return result
def to_csv(self, path=None, index=True, sep=",", na_rep='',
float_format=None, header=False, index_label=None,
mode='w', encoding=None, date_format=None, decimal='.'):
"""
Write Series to a comma-separated values (csv) file
Parameters
----------
path : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
header : boolean, default False
Write out series name
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
mode : Python write mode, default 'w'
sep : character, default ","
Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
date_format: string, default None
Format string for datetime objects.
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
# result is only a string if no path provided, otherwise None
result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,
float_format=float_format, header=header,
index_label=index_label, mode=mode,
encoding=encoding, date_format=date_format,
decimal=decimal)
if path is None:
return result
@Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
df = self.to_frame()
df.to_excel(excel_writer=excel_writer, sheet_name=sheet_name,
na_rep=na_rep, float_format=float_format, columns=columns,
header=header, index=index, index_label=index_label,
startrow=startrow, startcol=startcol, engine=engine,
merge_cells=merge_cells, encoding=encoding,
inf_rep=inf_rep, verbose=verbose)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(Series, self).isna()
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(Series, self).isnull()
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(Series, self).notna()
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(Series, self).notnull()
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return Series without null values
Returns
-------
valid : Series
inplace : boolean, default False
Do operation in place.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
kwargs.pop('how', None)
if kwargs:
raise TypeError('dropna() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axis = self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,
**kwargs)
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
mask = isna(self._values)
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[i]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
mask = isna(self._values[::-1])
i = mask.argmin()
if mask[i]:
return None
else:
return self.index[len(self) - i - 1]
# ----------------------------------------------------------------------
# Time series-oriented methods
def to_timestamp(self, freq=None, how='start', copy=True):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
Returns
-------
ts : Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
Returns
-------
ts : Series with PeriodIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values,
index=new_index).__finalize__(self)
# -------------------------------------------------------------------------
# Datetimelike delegation methods
dt = base.AccessorProperty(CombinedDatetimelikeProperties)
# -------------------------------------------------------------------------
# Categorical methods
cat = base.AccessorProperty(CategoricalAccessor)
def _dir_deletions(self):
return self._accessors
def _dir_additions(self):
rv = set()
for accessor in self._accessors:
try:
getattr(self, accessor)
rv.add(accessor)
except AttributeError:
pass
return rv
# ----------------------------------------------------------------------
# Add plotting methods to Series
plot = base.AccessorProperty(gfx.SeriesPlotMethods,
gfx.SeriesPlotMethods)
hist = gfx.hist_series
Series._setup_axes(['index'], info_axis=0, stat_axis=0, aliases={'rows': 0})
Series._add_numeric_operations()
Series._add_series_only_operations()
Series._add_series_or_dataframe_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)
ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)
# -----------------------------------------------------------------------------
# Supplementary functions
def _sanitize_index(data, index, copy=False):
""" sanitize an index type to return an ndarray of the underlying, pass
thru a non-Index
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError('Length of values does not match length of ' 'index')
if isinstance(data, PeriodIndex):
data = data.asobject
elif isinstance(data, DatetimeIndex):
data = data._to_embed(keep_tz=True)
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ['M', 'm']:
data = _sanitize_array(data, index, copy=copy)
return data
def _sanitize_array(data, index, dtype=None, copy=False,
raise_cast_failure=False):
""" sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
def _try_cast(arr, take_fast_path):
# perf shortcut as this is the most common case
if take_fast_path:
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
subarr = maybe_cast_to_datetime(arr, dtype)
if not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
subarr = Categorical(arr)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# GH #846
if isinstance(data, (np.ndarray, Index, Series)):
if dtype is not None:
subarr = np.array(data, copy=False)
# possibility of nan -> garbage
if is_float_dtype(data.dtype) and is_integer_dtype(dtype):
if not isna(data).any():
subarr = _try_cast(data, True)
elif copy:
subarr = data.copy()
else:
subarr = _try_cast(data, True)
elif isinstance(data, Index):
# don't coerce Index types
# e.g. indexes can have different conversions (so don't fast path
# them)
# GH 6140
subarr = _sanitize_index(data, index, copy=True)
else:
subarr = _try_cast(data, True)
if copy:
subarr = data.copy()
elif isinstance(data, Categorical):
subarr = data
if copy:
subarr = data.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, False)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
else:
subarr = _try_cast(data, False)
def create_from_value(value, index, dtype):
# return a new empty value suitable for the dtype
if is_datetimetz(dtype):
subarr = DatetimeIndex([value] * len(index), dtype=dtype)
elif is_categorical_dtype(dtype):
subarr = Categorical([value] * len(index))
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
return subarr
# scalar like, GH
if getattr(subarr, 'ndim', 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = create_from_value(value, index, dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = create_from_value(subarr[0], index,
subarr.dtype)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception('Data must be 1-dimensional')
else:
subarr = _asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, compat.string_types):
subarr = np.array(data, dtype=object, copy=copy)
return subarr
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/frame/methods/test_update.py | 4 | 4259 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestDataFrameUpdate:
def test_update_nan(self):
# #15593 #15617
# test 1
df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = df1.copy()
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
# test 2
df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)})
df2 = DataFrame({"A": [None, 2, 3]})
expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)})
df1.update(df2, overwrite=False)
tm.assert_frame_equal(df1, expected)
def test_update(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
df.update(other)
expected = DataFrame(
[[45.0, 45.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
tm.assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]]
)
tm.assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame(
[[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"bad_kwarg, exception, msg",
[
# errors must be 'ignore' or 'raise'
({"errors": "something"}, ValueError, "The parameter errors must.*"),
({"join": "inner"}, NotImplementedError, "Only left join is supported"),
],
)
def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg):
df = DataFrame([[1.5, 1, 3.0]])
with pytest.raises(exception, match=msg):
df.update(df, **bad_kwarg)
def test_update_raise_on_overlap(self):
df = DataFrame(
[[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2])
with pytest.raises(ValueError, match="Data overlaps"):
df.update(other, errors="raise")
def test_update_from_non_df(self):
d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])}
df = DataFrame(d)
d["a"] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}
df = DataFrame(d)
d["a"] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
tm.assert_frame_equal(df, expected)
def test_update_datetime_tz(self):
# GH 25807
result = DataFrame([pd.Timestamp("2019", tz="UTC")])
result.update(result)
expected = DataFrame([pd.Timestamp("2019", tz="UTC")])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
krebeljk/openInjMoldSim | tutorials/demo/dogbone/plot_co.py | 3 | 1072 | import numpy as np
from io import StringIO
import matplotlib.pyplot as plt
import re
import sys
'''
example usage:
python plot_dt.py log.openInjMoldSimFimaaIbac
'''
# get data
cas = ''
coMean = ''
coMax = ''
with open(str(sys.argv[1])) as origin_file:
data = origin_file.read()
for match in re.findall(r'(?m)^Time\s=.*', data):
cas = cas + match.split('=')[1] + '\n'
for match in re.findall(r'(?m)^Courant\sNumber\smean:.*', data):
coMean = coMean + match.split(' ')[3] + '\n'
coMax = coMax + match.split(' ')[5] + '\n'
cas = np.loadtxt(StringIO(cas))
coMean = np.loadtxt(StringIO(coMean))[1:]#skip first because calculated at time 0
coMax = np.loadtxt(StringIO(coMax))[1:]
if cas.size == coMean.size+1:
cas = cas[1:]
# plot
fig, ax = plt.subplots()
ax.set(xlabel='t [s]'
,ylabel='Co [1]'
,title='Courant number')
ax.grid()
ax.plot(cas, coMean, 'b', label='mean')
ax.plot(cas, coMax, 'r', label='max')
# legend = ax.legend(loc='upper right', shadow=True, fontsize='x-large')
plt.show()
# fig.savefig("kappa.png")
| gpl-3.0 |
lauralwatkins/voronoi | example/test.py | 1 | 1615 | """
Demo by G. Brammer
"""
import numpy as np
from voronoi import bin2d
import matplotlib.pyplot as plt
# Noisy gaussian
yp, xp = np.indices((100,100))
R = np.sqrt((xp-50)**2+(yp-50)**2)
sigma = 10
g = 10*np.exp(-R**2/2/sigma**2)
s = 1
noise = np.random.normal(size=R.shape)*s
pix_bin, bin_x, bin_y, bin_sn, bin_npix, scale = bin2d.bin2d(xp.flatten(), yp.flatten(), (g+noise).flatten(), g.flatten()*0+s, 20., cvt=True, wvt=False, graphs=False, quiet=False)
# Bin stats
bad = bin_sn < 5
masked = pix_bin*1
mean_bins = pix_bin*0.
median_bins = pix_bin*0.
mea = bin_x*0.
med = bin_x*0.
bx = bin_x*0.
by = bin_y*0.
bin_ids = np.unique(pix_bin)
for i in range(len(bin_ids)):
bin_mask = pix_bin == bin_ids[i]
mea[i] = (g+noise).flatten()[bin_mask].mean()
mean_bins[bin_mask] = mea[i]
med[i] = np.median((g+noise).flatten()[bin_mask])
median_bins[bin_mask] = med[i]
bx[i] = np.sum(xp.flatten()*bin_mask)/bin_mask.sum()
by[i] = np.sum(yp.flatten()*bin_mask)/bin_mask.sum()
for bin in np.where(bad)[0]:
bin_mask = pix_bin == bin
masked[bin_mask] = -99
# Plot
plt.rcParams['image.origin'] = 'lower'
fig = plt.figure(figsize=[9, 2.8])
ax = fig.add_subplot(131)
ax.imshow(pix_bin.reshape(R.shape))
ax.scatter(bin_x, bin_y, marker='.', color='k', alpha=0.1)
ax = fig.add_subplot(132)
ax.imshow(g+noise, vmin=-0.1, vmax=10, cmap='gray_r')
ax = fig.add_subplot(133)
ax.imshow(median_bins.reshape(R.shape), vmin=-0.1, vmax=10, cmap='gray_r')
for ax in fig.axes:
ax.set_xticklabels([]); ax.set_yticklabels([])
fig.tight_layout(pad=0.1)
fig.savefig('test.png')
| bsd-2-clause |
ojustino/test-demo | 3ps/bayes.py | 1 | 1826 | import numpy as np
import matplotlib.pyplot as plt
'''data should be whether or not a star had a companion.
so d is an a array with three 1s and 18 0s.'''
'''f = .8
d_i = np.array([1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # for 21 stars
N = 0; k = 0
j = 0
while(j < len(d_i)):
N += 1
if d_i[j] == 1:
k += 1
j += 1
print k, N
prob_d = f**k * (1-f)**(N-k)
#probability of this data is f**3 * (1-f)**18
x = np.linspace(0,1,500) # the fraction of stars with a companion
sigma = 1./15
prior = np.exp(-1./2*((.8-x)/sigma)**2)
#prior = np.ones(len(x))
bayes = prob_d * prior
#plt.plot(x,bayes)
#plt.plot(x,prior)
#plt.show()'''
# f is your fraction, k is # of relevant events, N is total # of events
def thehood(f,k,N):
likely = f**k * (1-f)**(N-k)
return likely
f1 = np.genfromtxt('ps3_q4data.txt')
mets = f1[:,1]
planet = f1[:,2]
metsP = [] # has companion
metsNP = [] # does not have companion
d = 0.; M = 0.
j = 0
while(j < len(planet)):
M += 1
if planet[j] == 1:
d += 1
metsP.append(mets[j])
else:
metsNP.append(mets[j])
j += 1
metsP = np.array(metsP)
metsNP = np.array(metsNP)
alpha = np.linspace(0,100,501); lena = len(alpha)
beta = np.linspace(0,100,501); lenb = len(beta)
likely = np.zeros([lena,lenb])
j = 0; k = 0
while(j < lena):
while(k < lenb):
f = alpha[j] * 10**(beta[k]*metsP)
f_not = alpha[j] * 10**(beta[k]*metsNP)
# THEY'RE SEPARATE THINGS
likely[j][k] = thehood()
k += 1
j += 1
prior_a = 1
prior_b = 1 # i guess we're ignoring these for now -- uninformative priors
# uninformative, drunk priors
#alpha = ; beta =
#params = [alpha,beta]
'''flaco = np.array(sorted(f1[:,1]))
plt.plot(10**flaco,)
plt.plot(np.linspace(0,len(f1[:,1]),len(f1[:,1])),)
plt.show()'''
| mit |
liuzhaoguo/FreeROI-1 | froi/gui/component/unused/volumedintensitydialog.py | 6 | 2368 | __author__ = 'zhouguangfu'
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
class VolumeIntensityDialog(QDialog):
"""
A dialog for action of voxel time point curve display.
"""
def __init__(self, model,parent=None):
super(VolumeIntensityDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
self._plot()
def _init_gui(self):
"""
Initialize GUI.
"""
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget,it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def _create_actions(self):
self._model.time_changed.connect(self._plot)
def _plot(self):
''' plot time time point curve.'''
volume_data = self._model.data(self._model.currentIndex(),Qt.UserRole + 5)
if self._model.data(self._model.currentIndex(),Qt.UserRole + 8):
data = volume_data[:,:,:,self._model.get_current_time_point()]
self.points = data[data!=0]
# self.points = volume_data[volume_data[:,:,:,self._model.get_current_time_point()]!=0l,
# self._model.get_current_time_point()]
else:
self.points = volume_data[volume_data!=0]
# create an axis
ax = self.figure.add_subplot(111)
ax.hold(False)
ax.hist(self.points,50)
plt.xlabel("Intensity")
plt.ylabel("Number")
plt.grid()
self.canvas.draw()
def closeEvent(self, QCloseEvent):
self._model.time_changed.disconnect(self._plot)
| bsd-3-clause |
leewujung/ooi_sonar | misc_source_files/decomp_plot.py | 1 | 5722 | import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def separate_transform_result(D,ori_data,ping_per_day_mvbs,log_opt=1):
'''
Separate transformed results into different frequencies and
for use with `plot_cmp_data_decomp` and `plot_single_day`
'''
D_long = D.reshape((D.shape[0],-1,ori_data.shape[1])).swapaxes(1,2)
D_sep = D_long.reshape((D_long.shape[0],D_long.shape[1],-1,ping_per_day_mvbs)).transpose((2,0,1,3))
if log_opt==1:
D_plot = 10*np.log10(D_sep.transpose((0,2,1,3))).reshape((D_sep.shape[0],D_sep.shape[2],-1))
else:
D_plot = D_sep.transpose((0,2,1,3)).reshape((D_sep.shape[0],D_sep.shape[2],-1))
return D_sep,D_plot
def plot_single_day(V,plot_day,ping_per_day_mvbs):
fig,ax = plt.subplots(1,3,figsize=(18,3))
# Get color axis limtis
v_mtx = V[:,1:-2,ping_per_day_mvbs*(plot_day-1)+np.arange(ping_per_day_mvbs)] # don't plot surface/bottom rows
cmean = np.mean(v_mtx.reshape((-1,1)))
cstd = np.std(v_mtx.reshape((-1,1)))
cmax = np.max(v_mtx.reshape((-1,1)))
for iX in range(3):
im = ax[iX].imshow(v_mtx[iX,::-1,:],aspect='auto',vmax=cmean+cstd*6,vmin=cmean-cstd*3)#,cmap=e_cmap,norm=e_norm)
divider = make_axes_locatable(ax[iX])
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im,cax=cax)
if iX==0:
ax[iX].set_title('38 kHz')
elif iX==1:
ax[iX].set_title('120 kHz')
else:
ax[iX].set_title('200 kHz')
#plt.savefig(os.path.join(save_path,save_fname))
def plot_comp(V,n_comp,ping_per_day_mvbs,figsize_input,log_opt=1,cax_all=0,cax=np.nan):
if log_opt==1:
V = 10*np.ma.log10(V)
if np.any(np.isnan(cax)):
cmean_all = np.mean(V)
cstd_all = np.std(V)
cmin_all = max((np.min(V),cmean_all-2*cstd_all))
cmax_all = min((np.max(V),cmean_all+3*cstd_all))
else:
cmin_all = cax[0]
cmax_all = cax[1]
fig,ax=plt.subplots(n_comp,1,sharex=True,figsize=figsize_input)
for c in range(n_comp):
if log_opt==1:
vlog = 10*np.ma.log10(V[c,:,:])
else:
vlog = V[c,:,:]
cmean = np.mean(V[c,:,:])
cstd = np.std(V[c,:,:])
if cax_all==1:
cmin = cmin_all
cmax = cmax_all
else:
cmin = max((np.min(V[c,:,:]),cmean-2*cstd))
cmax = min((np.max(V[c,:,:]),cmean+3*cstd))
im = ax[c].imshow(V[c,::-1,:],aspect='auto',vmin=cmin,vmax=cmax)
divider = make_axes_locatable(ax[c])
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im,cax=cax)
ax[c].set_xticks([x*ping_per_day_mvbs+ping_per_day_mvbs/2 for x in range(3)])
ax[c].set_xticklabels(['38k','120k','200k'])
ax[c].tick_params('both', length=0)
#plt.savefig(os.path.join(save_path,save_fname))
def plot_coef(W,n_comp,figsize_input=(22,3),log_opt=0):
plt.figure(figsize=figsize_input)
W[W==0] = sys.float_info.epsilon
labels = [str(x) for x in range(n_comp)]
for w, label in zip(W.T, labels):
plt.plot(range(1,len(w)+1),w, label=label,linewidth=2)
plt.legend()
plt.xticks(range(W.shape[0]))
if log_opt==1:
plt.yscale('log')
plt.xlim([0,W.shape[0]])
#plt.savefig(os.path.join(save_path,save_fname))
plt.show()
def plot_cmp_data_decomp(V,X,plot_day,ping_per_day_mvbs,figsize_input,same_cax_opt=1):
fig,ax = plt.subplots(2,3,figsize=figsize_input)
for iY in range(2):
# Get color axis limtis
v_mtx = V[:,:,ping_per_day_mvbs*(plot_day-1)+np.arange(ping_per_day_mvbs)].reshape((-1,1))
cmean = np.mean(v_mtx)
cstd = np.std(v_mtx)
cmax = np.max(v_mtx)
for iX in range(3):
if iY==0:
v = V[iX,::-1,ping_per_day_mvbs*(plot_day-1)+np.arange(ping_per_day_mvbs)] # data to be plotted
else:
v = X[iX,::-1,ping_per_day_mvbs*(plot_day-1)+np.arange(ping_per_day_mvbs)] # data to be plotted
if same_cax_opt==1:
im = ax[iY,iX].imshow(v.T,aspect='auto',vmax=cmean+cstd*6,vmin=cmean-cstd*3)
else:
im = ax[iY,iX].imshow(v.T,aspect='auto')
divider = make_axes_locatable(ax[iY,iX])
cax = divider.append_axes("right", size="2%", pad=0.1)
cbar = plt.colorbar(im,cax=cax)
if iX==0:
ax[iY,iX].set_title('38 kHz')
elif iX==1:
ax[iY,iX].set_title('120 kHz')
else:
ax[iY,iX].set_title('200 kHz')
#plt.savefig(os.path.join(save_path,save_fname))
def plot_original_echogram(MVBS,plot_start_day,plot_range_day):
fig,ax = plt.subplots(3,1,figsize=(15,6))
ax[0].imshow(MVBS[0,1:-2:-1,ping_per_day_mvbs*(plot_start_day-1)\
+np.arange(ping_per_day_mvbs*plot_range_day)].T,\
aspect='auto',vmin=-80,vmax=-30)
ax[1].imshow(MVBS[1,1:-2:-1,ping_per_day_mvbs*(plot_start_day-1)\
+np.arange(ping_per_day_mvbs*plot_range_day)].T,\
aspect='auto',vmin=-80,vmax=-30)
ax[2].imshow(MVBS[2,1:-2:-1,ping_per_day_mvbs*(plot_start_day-1)\
+np.arange(ping_per_day_mvbs*plot_range_day)].T,\
aspect='auto',vmin=-80,vmax=-30)
ax[2].set_xticks(np.arange(plot_range_day)*ping_per_day_mvbs+ping_per_day_mvbs/2)
ax[2].set_xticklabels(np.arange(plot_range_day)+plot_start_day)
ax[2].set_xlabel('Day',fontsize=14)
plt.show()
| apache-2.0 |
thientu/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/kernel_approximation.py | 7 | 18505 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than"
" -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
sbonner0/DeepTopologyClassification | src/legacy/DataGeneration/GenFingerPrint.py | 1 | 1814 | from graph_tool.all import *
import os, csv, time, datetime, sys
import GFP
import pickle
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
def loadDataAndGenPKL(inputdir, filename):
filehandler = open(filename, 'wb')
# Load the graph data. Need to think about the directory structure and balance of the datasets
for subdir, dirs, files in os.walk(inputdir):
for filename in files:
label = subdir.split("/")
label = label[len(label)-1]
g = Graph()
edges = []
filepath = subdir + os.sep + filename
date = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
print date + ": " + filepath
sys.stdout.flush()
with open(filepath) as networkData:
datareader = csv.reader(networkData, delimiter=" ")
for row in datareader:
if not row[0].startswith("#"):
edges.append([int(row[0]), int(row[1])])
networkData.close()
g.add_edge_list(edges, hashed=True) # Very important to hash the values here otherwise it creates too many nodes
g.set_directed(False)
# Pass the graph to the single fingerprint generation method and return the fingerprint vector
fp = GFP.GFPSingleFingerprint(g)
res = [label, filename, fp]
pickle.dump(res, filehandler)
filehandler.close()
return 0
def usage():
print """Usage:\n%s </path/to/input> <pickle filename>\nNB: Piclke created @ launch location unless absloute path used""" % (sys.argv[0])
if __name__ == "__main__":
if len (sys.argv) != 3 :
usage()
sys.exit (1)
loadDataAndGenPKL(sys.argv[1],sys.argv[2])
| gpl-3.0 |
marcua/qurk_experiments | qurkexp/join/bestfit.py | 1 | 1419 | #!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.stats.stats import ss
#setup_environ(settings)
from scipy import stats
def sigmoid(x, x0, k):
y = 1 / (1 + np.exp(-k*(x-x0)))
return y
def regress(x, y, howmuch):
# polynomials don't work that well
# p1 = np.poly1d(np.polyfit(x[:howmuch],y[:howmuch],1))
# p2 = np.poly1d(np.polyfit(x[:howmuch],y[:howmuch],2))
# p3 = np.poly1d(np.polyfit(x[:howmuch],y[:howmuch],3))
# sigmoid of the form 1/(1+e^(-k*(x-x0))), so we can change its slope
# (k) and intercept (x0)
popt, pcov = curve_fit(sigmoid, x[:howmuch], y[:howmuch])
return popt
def plot(x, y, popt):
xp = np.linspace(0, 100, 1000)
plt.plot(x, y, 'ro', xp, sigmoid(xp, *popt), 'b-')
plt.ylim(.8,1.05)
plt.show()
def sum_squared_error(x, y, popt):
yprime = sigmoid(x, *popt)
return ss(y-yprime)
def load(filename):
vals = [float(x) for x in open(filename).readlines()]
y = np.array(vals)
x = np.array(xrange(0, len(vals)))
return x,y
if __name__ == '__main__':
if len(sys.argv) != 2:
print "arguments: filename to do regression on"
sys.exit(-1)
x,y = load(sys.argv[1])
plot(x, y, regress(x, y, 40))
for i in [5, 10, 20, 30, 40, 50, 60, 70, 75]:
print i, sum_squared_error(x, y, regress(x, y, i))
| bsd-3-clause |
vybstat/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
AlertaDengue/InfoDenguePredict | infodenguepredict/models/visualizations/metrics_viz.py | 1 | 7870 | import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
from decouple import config
from infodenguepredict.data.infodengue import get_cluster_data, get_city_names
from infodenguepredict.models.random_forest import build_lagged_features
def loss_colormap(state, models, metric='mean_squared_error', predict_n=1):
"""
Colormap viz for model losses.
:param state: State to plot
:param models: List of models to show -> ['lstm', 'rf', 'tpot']
:param metric: Metric for y-axis -> ['mean_absolute_error', 'explained_variance_score', 'mean_squared_error',
'mean_squared_log_error', 'median_absolute_error', 'r2_score']
:param predict_n: Which window to compare
:return: Plot
"""
clusters = pd.read_pickle('../../analysis/clusters_{}.pkl'.format(state))
clusters = [y for x in clusters for y in x]
df = pd.DataFrame(columns=models, index=clusters)
for city in clusters:
if 'rf' in models:
rf = pd.read_pickle('../saved_models/random_forest/{}/rf_metrics_{}.pkl'.format(state, city))
df['rf'][city] = rf[predict_n][metric]
if 'lstm' in models:
lstm = pd.read_pickle('../saved_models/lstm/{}/lstm_metrics_{}.pkl'.format(state, city))
df['lstm'][city] = lstm[predict_n][metric]
if 'tpot' in models:
tpot = pd.read_pickle('../saved_models/tpot/{}/tpot_metrics_{}.pkl'.format(state, city))
df['tpot'][city] = tpot[predict_n][metric]
if 'rqf' in models:
rqf = pd.read_pickle('../saved_models/quantile_forest/{}/qf_metrics_{}.pkl'.format(state, city))
df['rqf'][city] = rqf[predict_n][metric]
df = df[df.columns].astype('float')
# falta normalizar a data?
sns_plot = sns.heatmap(df, cmap='vlag')
plt.savefig('{}_losses_heatmap.png'.format(state), dpi=400)
plt.show()
return None
def calculate_mape(state, lookback, horizon):
clusters = pd.read_pickle('../analysis/clusters_{}.pkl'.format(state))
for cluster in clusters:
data_full, group = get_cluster_data(geocode=cluster[0], clusters=clusters,
data_types=['alerta'], cols=['casos_est', 'casos'])
for city in cluster:
print(city)
target = 'casos_est_{}'.format(city)
casos_est_columns = ['casos_est_{}'.format(i) for i in group]
casos_columns = ['casos_{}'.format(i) for i in group]
data = data_full.drop(casos_columns, axis=1)
data_lag = build_lagged_features(data, lookback)
data_lag.dropna()
targets = {}
for d in range(1, horizon + 1):
if d == 1:
targets[d] = data_lag[target].shift(-(d - 1))
else:
targets[d] = data_lag[target].shift(-(d - 1))[:-(d - 1)]
X_data = data_lag.drop(casos_est_columns, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X_data, data_lag[target],
train_size=0.7, test_size=0.3, shuffle=False)
try:
metrics = pd.read_pickle('~/Documentos/resultados_infodengue/lasso/{}/lasso_metrics_{}.pkl'.format(state, city))
except EOFError:
print('---------------------------------')
print('ERROR', 'eof', city)
print('----------------------------------')
if metrics.shape[1] != 4:
print('---------------------------------')
print('ERROR', 'shape', city)
print('----------------------------------')
continue
values = []
for d in range(1, horizon + 1):
mae = metrics[d]['mean_absolute_error']
tgtt = targets[d][len(X_train):]
factor = (len(tgtt) / (len(tgtt) - 1)) * sum([abs(i - (tgtt[pos])) for pos, i in enumerate(tgtt[1:])])
if factor == 0:
values.append(np.nan)
else:
values.append(mae / factor)
metrics.loc['mean_absolute_scaled_error'] = values
metrics.to_pickle('~/Documentos/resultados_infodengue/lasso/{}/lasso_metrics_{}.pkl'.format(state, city))
return None
def loss_scatter(state, models, metric='mean_squared_error', predict_n=1):
"""
Scatter viz for model losses.
:param state: State to plot
:param models: List of models to show -> ['lstm', 'rf', 'tpot']
:param xaxis: List o xaxis possibilites -> ['cluster_size', 'pop_size', 'total_cases', 'latitude']
:param metric: Metric for y-axis -> ['mean_absolute_error', 'explained_variance_score', 'mean_squared_error',
'mean_squared_log_error', 'median_absolute_error', 'r2_score']
:param predict_n: Which window to compare
:return: Plot
"""
conexao = create_engine("postgresql://{}:{}@{}/{}".format(config('PSQL_USER'),
config('PSQL_PASSWORD'),
config('PSQL_HOST'),
config('PSQL_DB')))
if state == 'CE':
s = 'CE'
if state == 'RJ':
s = 'Rio de Janeiro'
if state == 'PR':
s = 'Paraná'
sql = 'select geocodigo,nome,populacao,casos_est from "Dengue_global"."Municipio" m JOIN "Municipio"."Historico_alerta" h ON m.geocodigo=h.municipio_geocodigo where uf=\'{}\';'.format(
s)
data = pd.read_sql_query(sql, conexao)
grouped = data.groupby('geocodigo')
clusters = pd.read_pickle('infodenguepredict/analysis/clusters_{}.pkl'.format(state))
cities = [y for x in clusters for y in x]
df = pd.DataFrame(columns=models + ['cluster_size', 'pop_size', 'total_casos', 'n_epidemia'], index=cities)
for city in cities:
group = grouped.get_group(city)
df['total_casos'][city] = group['casos_est'].sum()
city_pop = group['populacao'].iloc[0]
df['pop_size'][city] = city_pop
df['n_epidemia'][city] = len(group[group['casos_est'] > int(city_pop / 1000)])
if 'rf' in models:
rf = pd.read_pickle('~Documentos/resultados_infodengue/random_forest/{}/rf_metrics_{}.pkl'.format(state, city))
df['rf'][city] = rf[predict_n][metric]
if 'lstm' in models:
lstm = pd.read_pickle('~Documentos/resultados_infodengue/lstm/{}/metrics_lstm_{}.pkl'.format(state, city))
df['lstm'][city] = lstm[predict_n][metric]
if 'lasso' in models:
lasso = pd.read_pickle('~Documentos/resultados_infodengue/lasso/{}/lasso_metrics_{}.pkl'.format(state, city))
try:
df['lasso'][city] = lasso[predict_n][metric]
except KeyError:
df['lasso'][city] = np.nan
for cluster in clusters:
df['cluster_size'].loc[cluster] = len(cluster)
df = df[df.columns].astype('float')
# df = df[df.total_casos > 100]
df = df[df.pop_size < df.pop_size.mean() + 4 * df.pop_size.std()]
df = df[df.total_casos < df.total_casos.mean() + 4 * df.total_casos.std()]
fig, axs = plt.subplots(1, 3, figsize=(30, 7))
colors = ['b', 'r', 'g']
for pos, m in enumerate(models):
df_m = df[df[m] < df[m].mean() + 1 * df[m].std()]
df_m.plot.scatter(x='total_casos', y=m, ax=axs[0], alpha=0.3, grid=True, c=colors[pos])
df_m.plot.scatter(x='pop_size', y=m, ax=axs[1], alpha=0.3, grid=True, c=colors[pos])
df_m.plot.scatter(x='n_epidemia', y=m, ax=axs[2], alpha=0.3, grid=True, c=colors[pos])
plt.legend(models)
return df
if __name__ == "__main__":
loss_colormap('RJ', ['rqf'])
| gpl-3.0 |
hickerson/bbn | fable/fable_sources/libtbx/auto_build/package_defs.py | 1 | 1935 |
"""
Listing of current dependencies for CCTBX and related applications (including
LABELIT, xia2, DIALS, and Phenix with GUI). Not all of these can be downloaded
via the web (yet).
"""
from __future__ import division
BASE_CCI_PKG_URL = "http://cci.lbl.gov/third_party"
BASE_XIA_PKG_URL = "http://www.ccp4.ac.uk/xia"
# from CCI
PYTHON_PKG = "Python-2.7.6_cci.tar.gz"
# XXX we maintain a patched copy to avoid an ICE with gcc 3.4
NUMPY_PKG = "numpy-1.6.2.tar.gz" # used many places
IMAGING_PKG = "Imaging-1.1.7.tar.gz" # for labelit, gltbx
REPORTLAB_PKG = "reportlab-2.6.tar.gz" # for labelit
ZLIB_PKG = "zlib-1.2.7.tar.gz"
SCIPY_PKG = "scipy-0.11.0.tar.gz" # not used by default
PYRTF_PKG = "PyRTF-0.45.tar.gz" # for phenix.table_one, etc.
BIOPYTHON_PKG = "biopython-1.58.tar.gz" # used in iotbx
# from xia2 page
HDF5_PKG = "hdf5-1.8.8.tar.bz2" # dxtbx
H5PY_PKG = "h5py-2.0.1-edit.tar.gz" # dxtbx
# GUI dependencies
LIBPNG_PKG = "libpng-1.2.32.tar.gz"
FREETYPE_PKG = "freetype-2.4.2.tar.gz"
# Linux-only
GETTEXT_PKG = "gettext-0.18.2.tar.gz"
GLIB_PKG = "glib-2.12.11.tar.gz"
EXPAT_PKG = "expat-1.95.8.tar.gz"
FONTCONFIG_PKG = "fontconfig-2.3.95.tar.gz"
RENDER_PKG = "render-0.8.tar.gz"
XRENDER_PKG = "xrender-0.8.3.tar.gz"
XFT_PKG = "xft-2.1.2.tar.gz"
PIXMAN_PKG = "pixman-0.19.2.tar.gz"
CAIRO_PKG = "cairo-1.8.10.tar.gz"
PANGO_PKG = "pango-1.16.1.tar.gz"
ATK_PKG = "atk-1.9.1.tar.gz"
TIFF_PKG = "tiff-v3.6.1.tar.gz"
GTK_PKG = "gtk+-2.10.11.tar.gz"
GTK_ENGINE_PKG = "clearlooks-0.5.tar.gz"
GTK_THEME_PKG = "gtk_themes.tar.gz"
# end Linux-only
FONT_PKG = "fonts.tar.gz"
WXPYTHON_DEV_PKG = "wxPython-src-3.0.0.0_cci.tar.gz" # Mac 64-bit
WXPYTHON_PKG = "wxPython-src-2.8.12.1.tar.gz" # Linux, Mac 32-bit
WEBKIT_PKG = "wxwebkit.tar.gz" # not currently used
MATPLOTLIB_PKG = "matplotlib-1.3.0.tar.gz"
PY2APP_PKG = "py2app-0.7.3.tar.gz" # Mac only
| mit |
gviejo/ThalamusPhysio | python/figure_article/main_article_fig_supp_1.py | 1 | 16135 | #!/usr/bin/env python
import sys
sys.path.append("../")
import numpy as np
import pandas as pd
import scipy.io
from functions import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import sys
import scipy.ndimage.filters as filters
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from functools import reduce
from multiprocessing import Pool
import h5py as hd
from scipy.stats import zscore
from sklearn.manifold import TSNE, SpectralEmbedding
from skimage import filters
import os
from scipy.misc import imread
from skimage.filters import gaussian
space = pd.read_hdf("../../figures/figures_articles/figure1/space.hdf5")
burst = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
burst = burst.loc[space.index]
# autocorr = pd.read_hdf("../../figures/figures_articles/figure1/autocorr.hdf5")
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
# carte38_mouse17 = imread('../../figures/mapping_to_align/paxino/paxino_38_mouse17.png')
# carte38_mouse17_2 = imread('../../figures/mapping_to_align/paxino/paxino_38_mouse17_2.png')
# bound_map_38 = (-2336/1044, 2480/1044, 0, 2663/1044)
# cut_bound_map = (-86/1044, 2480/1044, 0, 2663/1044)
carte_adrien = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_ALL-01.png')
carte_adrien2 = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_Contour-01.png')
bound_adrien = (-398/1254, 3319/1254, -(239/1254 - 20/1044), 3278/1254)
tmp = cPickle.load(open("../../figures/figures_articles/figure1/shifts.pickle", 'rb'))
angles = tmp['angles']
shifts = tmp['shifts']
hd_index = space.index.values[space['hd'] == 1]
neurontoplot = [np.intersect1d(hd_index, space.index.values[space['cluster'] == 1])[0],
burst.loc[space.index.values[space['cluster'] == 0]].sort_values('sws').index[3],
burst.sort_values('sws').index.values[-20]]
# specific to mouse 17
subspace = pd.read_hdf("../../figures/figures_articles/figure1/subspace_Mouse17.hdf5")
data = cPickle.load(open("../../figures/figures_articles/figure1/rotated_images_Mouse17.pickle", 'rb'))
rotated_images = data['rotated_images']
new_xy_shank = data['new_xy_shank']
bound = data['bound']
data = cPickle.load(open("../../data/maps/Mouse17.pickle", 'rb'))
x = data['x']
y = data['y']*-1.0+np.max(data['y'])
headdir = data['headdir']
xy_pos = new_xy_shank.reshape(len(y), len(x), 2)
###############################################################################################################
###############################################################################################################
# PLOT
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean*1.8 # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 8, # LaTeX default is 10pt font.
"font.size": 7,
"legend.fontsize": 7, # Make the legend/label fonts a little smaller
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 0.2,
"axes.linewidth" : 0.8,
"ytick.major.size" : 1.5,
"xtick.major.size" : 1.5
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
colors = ['red', 'green', 'blue', 'orange']
cmaps = ['Reds', 'Greens', 'Blues', 'Purples', 'Oranges']
markers = ['o', '^', '*', 's']
fig = figure(figsize = figsize(1.0))
outergs = gridspec.GridSpec(5,3, figure = fig, height_ratios = [0.5,0.5,0.25,0.25,0.25])
#############################################
# A. TOTAL MATRIX NEURON COUNT MOUSE 17
#############################################
gsm = gridspec.GridSpecFromSubplotSpec(1,2, subplot_spec = outergs[0,0], width_ratios = [0.5, 0.5])
count_cmap = 'jet'
# axA = fig.add_subplot(outergs[0,0])
axA = fig.add_subplot(gsm[0,0])
simpleaxis(axA)
axA.text(-0.20, 1.01, "A", transform = axA.transAxes, fontsize = 10)
im = imshow(data['total'], aspect = 'equal', cmap = count_cmap)
ylabel("Session number")
xlabel("Shank number")
# arrow
cax = inset_axes(axA, "100%", "50%",
bbox_to_anchor=(1.4, 0.35, 1, 1),
bbox_transform=axA.transAxes,
loc = 'lower left')
cax.arrow(0,0.2,0.8,0, linewidth = 4, head_width = 0.1, facecolor = 'black', head_length = 0.05)
noaxis(cax)
cax.set_xticks([])
cax.set_yticks([])
# gaussian
cax = inset_axes(axA, "50%", "20%",
bbox_to_anchor=(1.6, 0.1, 1, 1),
bbox_transform=axA.transAxes,
loc = 'lower left')
window = scipy.signal.gaussian(51, std=7)
simpleaxis(cax)
cax.plot(window)
cax.set_xticks([])
cax.set_yticks([])
cax.set_xlabel("2. Smoothing", fontsize = 8)
cax.set_title("1. Interpolation", fontsize = 8, pad = 30.0)
#############################################
# B. COUT NEURONS MOUSE 17 SMOOTHED
#############################################
axB = fig.add_subplot(outergs[0,1])
simpleaxis(axB)
axB.text(-0.5, 1.01, "B", transform = axB.transAxes, fontsize = 10)
xnew, ynew, tmp = interpolate(data['total'], data['x'], data['y'], 0.010)
total2 = gaussian(tmp, sigma = 15.0, mode = 'reflect')
imshow(total2, aspect = 'equal', extent = (x[0], x[-1], y[0], y[-1]), cmap = count_cmap)
xlabel("Shank position (mm)")
ylabel("Session position (mm)")
xl = ['' for _ in range(len(x))]
for i in np.arange(0 ,len(x), 2): xl[i] = str(np.round(data['x'][i], 2))
yl = ['' for _ in range(len(y))]
for i in np.arange(0 ,len(y), 4): yl[i] = str(np.round(data['y'][i], 3))
xticks(data['x'], xl)
yticks(data['y'], yl)
#############################################
# C. RECORDINGS SITE MOUSE 17
#############################################
axC = fig.add_subplot(outergs[1,0])
noaxis(axC)
axC.text(-0.15, 0.95, "C", transform = axC.transAxes, fontsize = 10)
sc = scatter(new_xy_shank[:,0], new_xy_shank[:,1], c = data['total'].flatten(), s = data['total'].flatten()*0.6, cmap = count_cmap)
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
#colorbar
cax = inset_axes(axC, "30%", "5%",
bbox_to_anchor=(0.85, 0.2, 1, 1),
bbox_transform=axC.transAxes,
loc = 'lower left')
cb = colorbar(sc, cax = cax, orientation = 'horizontal')
cb.set_label('Neuron count', labelpad = 0)
cb.ax.xaxis.set_tick_params(pad = 2)
cax.set_title("Mouse 1", fontsize = 9, pad = 2.5)
#############################################
# D. DENSITY NEURONS MOUSE 17 SMOOTHED
#############################################
axD = fig.add_subplot(outergs[1,1])
noaxis(axD)
axD.text(-0.2, 0.95, "D", transform = axD.transAxes, fontsize = 10)
h, w = total2.shape
total3 = np.zeros((h*3, w*3))*np.nan
total3[h:h*2,w:w*2] = total2.copy() + 1.0
total3 = rotateImage(total3, -angles[1])
total3[total3 == 0.0] = np.nan
total3 -= 1.0
tocrop = np.where(~np.isnan(total3))
total3 = total3[tocrop[0].min()-1:tocrop[0].max()+1,tocrop[1].min()-1:tocrop[1].max()+1]
xlength, ylength = getXYshapeofRotatedMatrix(data['x'].max(), data['y'].max(), angles[1])
bound = (shifts[1][0],xlength+shifts[1][0],shifts[1][1],ylength+shifts[1][1])
imshow(total3, extent = bound, alpha = 0.8, aspect = 'equal', cmap = count_cmap)
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
#############################################
# E. SQUARE ALL NEURONS
#############################################
axE = fig.add_subplot(outergs[0,2])
noaxis(axE)
axE.text(-0.2, 1.0, "E", transform = axE.transAxes, fontsize = 10)
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
leghandles = []
xbins = np.arange(-0.4, 2.1, 0.2)
ybins = np.arange(0.4, 2.8, 0.2)[::-1]
all_count = np.zeros((len(ybins), len(xbins)))
for i, m, l in zip([1,0,2,3], ['Mouse17', 'Mouse12', 'Mouse20', 'Mouse32'], [1,2,3,4]):
data = cPickle.load(open("../../figures/figures_articles/figure1/rotated_images_"+m+".pickle", 'rb'))
new_xy_shank = data['new_xy_shank']
xidx = np.digitize(new_xy_shank[:,0], xbins)
yidx = np.digitize(new_xy_shank[:,1], ybins)
data = cPickle.load(open("../../data/maps/"+m+".pickle", 'rb'))
xx, yy = np.meshgrid(np.arange(len(data['x'])), np.arange(len(data['y'])))
for j, x, y in zip(np.arange(len(xidx)), xidx, yidx):
all_count[y,x] += data['total'][yy.flatten()[j],xx.flatten()[j]]
xx = new_xy_shank[:,0].reshape(len(data['y']), len(data['x']))
yy = new_xy_shank[:,1].reshape(len(data['y']), len(data['x']))
lower_left = (xx[-1,0], yy[-1,0])
rect = Rectangle(lower_left, data['x'].max(), data['y'].max(), -angles[i], fill = False, edgecolor = colors[i])
axE.add_patch(rect)
leghandles.append(Line2D([], [], color = colors[i], marker = '', label = 'Mouse '+str(l)))
legend(handles = leghandles, loc = 'lower left', bbox_to_anchor=(-0.2, -0.1))
ylim(0, 2.7)
#############################################
# F. DENSITY NEURONS ALL MOUSE HISTOFRAM
#############################################
axF = fig.add_subplot(outergs[1,2])
noaxis(axF)
axF.text(-0.1, 0.95, "F", transform = axF.transAxes, fontsize = 10)
# all_count[all_count <= 0.0] = np.nan
carte_adrien3 = np.copy(carte_adrien2)
carte_adrien3[:,:,-1][carte_adrien3[:,:,-1]<150] = 0.0
im = imshow(all_count, extent = (xbins[0], xbins[-1], ybins[-1], ybins[0]), aspect = 'equal', alpha = 0.8, cmap = count_cmap, interpolation = 'bilinear')
imshow(carte_adrien3, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
# xlim(-2.2, 2.2)
# ylim(0, 2.7)
#colorbar
cax = inset_axes(axF, "25%", "5%",
bbox_to_anchor=(0.9, 0.15, 1, 1),
bbox_transform=axF.transAxes,
loc = 'lower left')
cb = colorbar(im, cax = cax, orientation = 'horizontal')
cb.set_label('Neuron count', labelpad = 0)
cb.ax.xaxis.set_tick_params(pad = 2)
cax.set_title("All Mice", fontsize = 9, pad = 2.5)
#############################################
# G. H. I MOUSE 12 20 32
#############################################
lb = ['G', 'H', 'I']
mn = ['Mouse 2', 'Mouse 3', 'Mouse 4']
idn = [0,2,3]
gs = gridspec.GridSpecFromSubplotSpec(3,4, subplot_spec = outergs[2:,:], hspace = 0.1, wspace = 0.2)
for i, m in enumerate(['Mouse12', 'Mouse20', 'Mouse32']):
for j in range(4):
ax = fig.add_subplot(gs[i,j])
noaxis(ax)
if j == 0: ax.text(-0.0, 0.95, lb[i], transform = ax.transAxes, fontsize = 10)
data1 = cPickle.load(open("../../figures/figures_articles/figure1/rotated_images_"+m+".pickle", 'rb'))
data2 = cPickle.load(open("../../data/maps/"+m+".pickle", 'rb'))
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
xlim(-0.5,3.0)
ylim(0, 2.8)
if j == 0: # POSITION HEAD DIR
new_xy_shank = data1['new_xy_shank']
tmp2 = data2['headdir']
tmp2[tmp2<0.05] = 0.0
scatter(new_xy_shank[:,0], new_xy_shank[:,1], s = tmp2*5., label = 'HD',
color = 'red', marker = 'o', alpha = 1.0)
scatter(new_xy_shank[:,0], new_xy_shank[:,1], s = 1, color = 'black', marker = '.',
alpha = 1.0, linewidths = 0.5, label = 'shanks')
title(mn[i], loc = 'right', fontsize = 12, pad = -0.01)
if i == 0: leg = legend(loc = 'lower left', fontsize = 7, frameon = False, bbox_to_anchor=(0.8, 0))
elif j == 1: # NEURON COUNT
xnew, ynew, tmp = interpolate(data2['total'], data2['x'], data2['y'], 0.010)
total2 = gaussian(tmp, sigma = 15.0, mode = 'reflect')
h, w = total2.shape
total3 = np.zeros((h*3, w*3))*np.nan
total3[h:h*2,w:w*2] = total2.copy() + 1.0
total3 = rotateImage(total3, -angles[idn[i]])
total3[total3 == 0.0] = np.nan
total3 -= 1.0
tocrop = np.where(~np.isnan(total3))
total3 = total3[tocrop[0].min()-1:tocrop[0].max()+1,tocrop[1].min()-1:tocrop[1].max()+1]
xlength, ylength = getXYshapeofRotatedMatrix(data2['x'].max(), data2['y'].max(), angles[idn[i]])
bound = (shifts[idn[i]][0],xlength+shifts[idn[i]][0],shifts[idn[i]][1],ylength+shifts[idn[i]][1])
total3 -= np.nanmin(total3)
total3 /= np.nanmax(total3)
total3 *= np.max(data2['total'])
im = imshow(total3, extent = bound, alpha = 0.8, aspect = 'equal', cmap = count_cmap)
#colorbar
cax = inset_axes(ax, "30%", "7%",
bbox_to_anchor=(0.7, 0.12, 1, 1),
bbox_transform=ax.transAxes,
loc = 'lower left')
cb = colorbar(im, cax = cax, orientation = 'horizontal', ticks = [0, int(np.max(data2['total']))])
cb.set_label('Neuron count', labelpad = 0)
cb.ax.xaxis.set_tick_params(pad = 2)
elif j == 3:
# cluster 2 burstiness
tmp = data1['rotated_images'][-1]
bound = data1['bound']
imshow(tmp, extent = bound, alpha = 0.9, aspect = 'equal', cmap = 'viridis')
if i == 0:
title("Cluster 2 (Burstiness)")
#colorbar
cax = inset_axes(ax, "30%", "8%",
bbox_to_anchor=(0.7, -0.1, 1, 1),
bbox_transform=ax.transAxes,
loc = 'lower left')
cb = matplotlib.colorbar.ColorbarBase(cax, cmap='viridis', ticks = [0, 1], orientation = 'horizontal')
cb.set_label('Burstiness', labelpad = -25)
cb.ax.xaxis.set_tick_params(pad = 1)
elif j == 2: # Cluster `
# cluster 1 HD
tmp = data1['rotated_images'][1]
tmp[tmp<0.0] = 0.0
bound = data1['bound']
imshow(tmp, extent = bound, alpha = 0.9, aspect = 'equal', cmap = 'Reds')
if i == 0:
title('Cluster 1 (HD)')
#colorbar
cax = inset_axes(ax, "30%", "8%",
bbox_to_anchor=(0.7, -0.1, 1, 1),
bbox_transform=ax.transAxes,
loc = 'lower left')
cb = matplotlib.colorbar.ColorbarBase(cax, cmap='Reds', ticks = [0, 1], orientation = 'horizontal')
cb.set_label('Density', labelpad = -25)
cb.ax.xaxis.set_tick_params(pad = 1)
# subplots_adjust(left = 0.01, right = 0.98, top = 0.98, bottom = 0.1, wspace = 0.2, hspace = 0.9)
subplots_adjust(wspace = -0.1, left = 0.02, right = 0.98, bottom = 0.01, top = 0.98, hspace = 0.5)
savefig("../../figures/figures_articles/figart_supp_1.pdf", dpi = 900, facecolor = 'white')
os.system("evince ../../figures/figures_articles/figart_supp_1.pdf &")
| gpl-3.0 |
makelove/OpenCV-Python-Tutorial | ch46-机器学习-K近邻/2-使用kNN对手写数字OCR.py | 1 | 1987 | # -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午7:32
# @Author : play4fun
# @File : 2-使用kNN对手写数字OCR.py
# @Software: PyCharm
"""
2-使用kNN对手写数字OCR.py:
"""
# 准备数据
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('../data/digits.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row, 100) for row in np.vsplit(gray, 50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:, :50].reshape(-1, 400).astype(np.float32) # Size = (2500,400)
test = x[:, 50:100].reshape(-1, 400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k, 250)[:, np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create()
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels)
ret, result, neighbours, dist = knn.findNearest(test, k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result == test_labels
correct = np.count_nonzero(matches)
accuracy = correct * 100.0 / result.size
print('准确率', accuracy) # 准确率91.76%
# save the data
np.savez('knn_data.npz', train=train, train_labels=train_labels,test=test,test_labels=test_labels)
# Now load the data
with np.load('knn_data_num.npz') as data:
print(data.files)
train = data['train']
train_labels = data['train_labels']
test = data['test']
test_labels = data['test_labels']
#TODO 怎样预测数字?
retval, results=knn.predict(test[1003:1005])
# Docstring: predict(samples[, results[, flags]]) -> retval, results
print(retval, results)#(4.0, array([[ 4.],[ 4.]], dtype=float32))
#对比
cv2.imwrite('test[1005].jpg',test[1005].reshape((20,20))) | mit |
bmazin/SDR | Projects/FirmwareTests/darkDebug/a2gPhaseStreamTest.py | 1 | 17461 | """
File: a2gPhaseStreamTest.py
Author: Matt Strader
Date: Jun 29, 2016
Firmware: darksc2_2016_Jun_28_1925.fpg
This script inserts a phase pulse in the qdr dds table. It checks snap blocks for each stage of the channelization process. In the end the phase pulse should be recovered in the phase timestream of the chosen channel.
"""
import matplotlib, time, struct
import numpy as np
import matplotlib.pyplot as plt
import casperfpga
import corr
import logging
from myQdr import Qdr as myQdr
import types
import sys
import functools
from loadWavePulseLut import loadWaveToMem,loadDdsToMem
from loadWaveLut import writeBram
from Utils.binTools import castBin
def snapDdc(fpga,bSnapAll=False,bPlot=False,selBinIndex=0,selChanIndex=0,selChanStream=0,ddsAddrTrig=0):
"""trigger and read snapshots of aligned input and data values in the firmware
INPUTS:
bSnapAll: If True, snapshot will record values for all channels, not just one
bPlot: If True, will popup a plot of snapped values
selBinIndex: the fft bin to be inspected
selChanIndex: the channel within a stream (after channel selection) to be inspected
selChanStream: which of the four simultaneous streams of channels to inspect
ddsAddrTrig: trigger when the address for the DDS look up table reaches this value (out of 2**20)
OUTPUT:
dict with keys:
'bin': complex values seen in a chosen fft bin
'chan': complex values in a chosen channel
'dds': complex values coming from the QDR look-up table
'mix': complex values after the dds mixer but before the low pass filter
'ddcOut': complex values after the DDC low pass filter and downsampling
'chanCtr': the channel numbers associated with values in 'chan','dds','mix','ddcOut'.
If bSnapAll=False, these should all equal selChanIndex
'expectedMix': the values of 'chan' multiplied by 'dds'. Hopefully this matches the values in
'mix'.
"""
#set up the snapshots to record the selected bin/channel
fpga.write_int('sel_bin',selBinIndex)
fpga.write_int('sel_bch',selChanIndex)
#fpga.write_int('sel_stream',selChanStream)
fpga.write_int('sel_ctr',ddsAddrTrig)
snapshotNames = ['snp2_bin_ss','snp2_ch_ss','snp2_dds_ss','snp2_mix_ss','snp2_ctr_ss','snp3_ddc_ss','snp3_cap_ss']
for name in snapshotNames:
fpga.snapshots[name].arm(man_valid=bSnapAll)
time.sleep(.1)
fpga.write_int('trig_buf',1)#trigger snapshots
time.sleep(.1) #wait for other trigger conditions to be met
fpga.write_int('trig_buf',0)#release trigger
#in most of the snapshots, we get two IQ values per cycle (I[t=0],Q[t=0]) and (I[t=1],Q[t=1])
#Retrieve them separately and then interleave them
binData = fpga.snapshots['snp2_bin_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
i0 = np.array(binData['i0'])
i1 = np.array(binData['i1'])
q0 = np.array(binData['q0'])
q1 = np.array(binData['q1'])
#interleave values from alternating cycles (I0,Q0) and (I1,Q1)
bi = np.vstack((i0,i1)).flatten('F')
bq = np.vstack((q0,q1)).flatten('F')
chanData = fpga.snapshots['snp2_ch_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
ci0 = np.array(chanData['i0'])
ci1 = np.array(chanData['i1'])
cq0 = np.array(chanData['q0'])
cq1 = np.array(chanData['q1'])
ci = np.vstack((ci0,ci1)).flatten('F')
cq = np.vstack((cq0,cq1)).flatten('F')
ddsData = fpga.snapshots['snp2_dds_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
di0 = np.array(ddsData['i0'])
di1 = np.array(ddsData['i1'])
dq0 = np.array(ddsData['q0'])
dq1 = np.array(ddsData['q1'])
#interleave i0 and i1 values
di = np.vstack((di0,di1)).flatten('F')
dq = np.vstack((dq0,dq1)).flatten('F')
expectedMix = (ci+1.j*cq)*(di-1.j*dq)
mixerData = fpga.snapshots['snp2_mix_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
mi0 = np.array(mixerData['i0'])
mi1 = np.array(mixerData['i1'])
mq0 = np.array(mixerData['q0'])
mq1 = np.array(mixerData['q1'])
#interleave i0 and i1 values
mi = np.vstack((mi0,mi1)).flatten('F')
mq = np.vstack((mq0,mq1)).flatten('F')
#The low-pass filter in the DDC stage downsamples by 2, so we only get one sample per cycle here
ddcData = fpga.snapshots['snp3_ddc_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
li = np.array(ddcData['i0'])
lq = np.array(ddcData['q0'])
rawPhase = np.array(ddcData['raw_phase'])
phaseData = fpga.snapshots['snp3_cap_ss'].read(timeout=5,arm=False,man_valid=bSnapAll)['data']
filtPhase = np.array(phaseData['phase'])
#basePhase = np.array(phaseData['base'])
trig = np.array(phaseData['trig'],dtype=np.bool)
#trig2 = np.array(phaseData['trig_raw'],dtype=np.bool)
ctrData = fpga.snapshots['snp2_ctr_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
ctr = np.array(ctrData['ctr']) #the channel counter (0-256)
dctr = np.array(ctrData['dctr']) #the dds lut address counter (0-2**20)
if bPlot:
#we have the same number of samples from the lpf/downsample as everything else, but the each one
#corresponds to every other timesample in the others. So leave off the second half of lpf samples
#so the samples we have correspond to the same time period as the others, at least when plotting.
liSample = li[0:len(mi)/2]
fig,ax = plt.subplots(1,1)
ax.plot(di,'r.-',label='dds')
ax.plot(bi,'bv-',label='bin')
ax.plot(ci,'go-',label='channel')
ax.plot(mi,'m^-',label='mix')
ddcTimes = 2.*np.arange(0,len(liSample))
ax.plot(ddcTimes,liSample,'k.-',label='ddcOut')
ax.set_title('I')
ax.legend(loc='best')
return {'bin':(bi+1.j*bq),'chan':(ci+1.j*cq),'dds':(di+1.j*dq),'mix':(mi+1.j*mq),'ddcOut':(li+1.j*lq),'chanCtr':ctr,'ddsCtr':dctr,'expectedMix':expectedMix,'rawPhase':rawPhase,'filtPhase':filtPhase,'trig':trig}#,'trig2':trig2}#,'basePhase':basePhase}
def setSingleChanSelection(fpga,selBinNums=[0,0,0,0],chanNum=0):
"""assigns bin numbers to a single channel (in each stream), to configure chan_sel block
INPUTS:
selBinNums: 4 bin numbers (for 4 streams) to be assigned to chanNum
chanNum: the channel number to be assigned
"""
nStreams = 4
if len(selBinNums) != nStreams:
raise TypeError,'selBinNums must have number of elements matching number of streams in firmware'
fpga.write_int('chan_sel_load',0) #set to zero so nothing loads while we set other registers.
#assign the bin number to be loaded to each stream
fpga.write_int('chan_sel_ch_bin0',selBinNums[0])
fpga.write_int('chan_sel_ch_bin1',selBinNums[1])
fpga.write_int('chan_sel_ch_bin2',selBinNums[2])
fpga.write_int('chan_sel_ch_bin3',selBinNums[3])
time.sleep(.1)
#in the register chan_sel_load, the lsb initiates the loading of the above bin numbers into memory
#the 8 bits above the lsb indicate which channel is being loaded (for all streams)
loadVal = (chanNum << 1) + 1
fpga.write_int('chan_sel_load',loadVal)
time.sleep(.1) #give it a chance to load
fpga.write_int('chan_sel_load',0) #stop loading
def startStream(fpga,selChanIndex=0):
"""initiates streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
INPUTS:
selChanIndex: which channel to stream
"""
dest_ip =167772210 #corresponds to IP 10.0.0.50
fabric_port=50000
pktsPerFrame = 100 #how many 8byte words to accumulate before sending a frame
#configure the gbe core,
print 'restarting'
fpga.write_int('gbe64_dest_ip',dest_ip)
fpga.write_int('gbe64_dest_port',fabric_port)
fpga.write_int('gbe64_words_per_frame',pktsPerFrame)
#reset the core to make sure it's in a clean state
fpga.write_int('gbe64_rst',1)
time.sleep(.1)
fpga.write_int('gbe64_rst',0)
#choose what channel to stream
fpga.write_int('phase_dmp_ch_we',selChanIndex)
#turn it on
fpga.write_int('start_cap',0)#make sure we're not streaming photons
fpga.write_int('phase_dmp_on',1)
def setThresh(fpga,thresholdDeg = -15.,chanNum=0):
"""Sets the phase threshold and baseline filter for photon pulse detection triggers in each channel
INPUTS:
thresholdDeg: The threshold in degrees. The phase must drop below this value to trigger a photon
event
"""
#convert deg to radians
thresholdRad = thresholdDeg * np.pi/180.
#format it as a fix16_13 to be placed in a register
binThreshold = castBin(thresholdRad,quantization='Round',nBits=16,binaryPoint=13,format='uint')
sampleRate = 1.e6
#for the baseline, we apply a second order state variable low pass filter to the phase
#See http://www.earlevel.com/main/2003/03/02/the-digital-state-variable-filter/
#The filter takes two parameters based on the desired Q factor and cutoff frequency
criticalFreq = 200 #Hz
Q=.7
baseKf=2*np.sin(np.pi*criticalFreq/sampleRate)
baseKq=1./Q
#format these paramters as fix18_16 values to be loaded to registers
binBaseKf=castBin(baseKf,quantization='Round',nBits=18,binaryPoint=16,format='uint')
binBaseKq=castBin(baseKq,quantization='Round',nBits=18,binaryPoint=16,format='uint')
print 'threshold',thresholdDeg,binThreshold
print 'Kf:',baseKf,binBaseKf
print 'Kq:',baseKq,binBaseKq
#load the values in
fpga.write_int('capture0_base_kf',binBaseKf)
fpga.write_int('capture0_base_kq',binBaseKq)
fpga.write_int('capture0_threshold',binThreshold)
fpga.write_int('capture0_load_thresh',1+chanNum<<1)
time.sleep(.1)
fpga.write_int('capture0_load_thresh',0)
def stopStream(fpga):
"""stops streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
INPUTS:
fpga: the casperfpga instance
"""
fpga.write_int('phase_dmp_on',0)
if __name__=='__main__':
#Get the IP of the casperfpga from the command line
if len(sys.argv) > 1:
ip = '10.0.0.'+sys.argv[1]
else:
ip='10.0.0.112'
print ip
fpga = casperfpga.katcp_fpga.KatcpFpga(ip,timeout=50.)
time.sleep(1)
if not fpga.is_running():
print 'Firmware is not running. Start firmware, calibrate, and load wave into qdr first!'
exit(0)
fpga.get_system_information()
bLoadAddr = False #set up chan_sel block
bLoadDds = False #compute and load dds table into qdr memory
bLoadFir = False #load fir coefficients into prog_fir block for each channel
bLoadDac = False #load probe tones into bram for dac/adc simulation block
bSetThresh = False #set the photon phase trigger threshold in the capture block
bStreamPhase = False #initiate stream of phase timestream to ethernet for selected channel
instrument = 'darkness'
startRegisterName = 'run'
#collect the names of bram blocks in firmware for the dac/adc simulator block
memNames = ['dac_lut_mem0','dac_lut_mem1','dac_lut_mem2']
memType='bram'
nBins = 2048
nChannels = 1024
nChannelsPerStream = 256
MHz = 1.e6
#parameters for dac look-up table (lut)
sampleRate = 2.e9
nSamplesPerCycle = 8
nLutRowsToUse = 2**11
nBytesPerMemSample = 8
nBitsPerSamplePair = 24
dynamicRange = .05
nSamples=nSamplesPerCycle*nLutRowsToUse
binSpacing = sampleRate/nBins
dacFreqResolution = sampleRate/nSamples
#set the frequency of what the resonator would be. We will set the ddc to target this frequency
#resFreq = 7.32421875e6 #already quantized
resFreq = 135620117.1875 #Hz, freqIndex=1111 in pps0.xpr
quantizedResFreq = np.round(resFreq/dacFreqResolution)*dacFreqResolution
genBinIndex = resFreq/binSpacing
selBinIndex = np.round(genBinIndex)
selChanIndex = 33
selChanStream = 0
ddsAddrTrig = 0
binCenterFreq = selBinIndex*binSpacing
#parameters for dds look-up table (lut)
nDdsSamplesPerCycle = 2
fpgaClockRate = 250.e6
nCyclesToLoopToSameChannel = nChannelsPerStream
nQdrRows = 2**20
nBytesPerQdrSample = 8
nBitsPerDdsSamplePair = 32
ddsSampleRate = nDdsSamplesPerCycle * fpgaClockRate / nCyclesToLoopToSameChannel
nDdsSamples = nDdsSamplesPerCycle*nQdrRows/nCyclesToLoopToSameChannel
print 'N dds samples per channel',nDdsSamples
ddsFreqResolution = 1.*ddsSampleRate/nDdsSamples
ddsFreq = quantizedResFreq - binCenterFreq
print 'unrounded dds freq',ddsFreq/MHz
#quantize the dds freq according to its resolution
ddsFreq = np.round(ddsFreq/ddsFreqResolution)*ddsFreqResolution
ddsFreqs = np.zeros(nChannels)
ddsFreqs[selChanIndex] = ddsFreq
ddsPhases = np.zeros(nChannels)
print 'dac freq resoluton',dacFreqResolution
print 'resonator freq',resFreq/MHz
print 'quantized resonator freq',quantizedResFreq/MHz
print 'bin center freq',binCenterFreq/MHz
print 'dds sampleRate',ddsSampleRate/MHz,'MHz. res',ddsFreqResolution/MHz,'MHz'
print 'dds freq',ddsFreq
print 'gen bin index',genBinIndex
print 'bin index',selBinIndex
print 'channel',selChanIndex
#set the delay between the dds lut and the end of the fft block (firmware dependent)
ddsShift = 76+256-8
fpga.write_int('dds_shift',ddsShift)
#set list of bins to save in the channel selection block
if bLoadAddr:
setSingleChanSelection(fpga,selBinNums=[selBinIndex,0,0,0],chanNum=selChanIndex)
if bLoadDds:
injectPulseDict = {'ampDeg':30.,'arrivalTime':50.e-6,'decayTime':40.e-6}
nullPulseDict = {'arrivalTime':0}
pulseDicts = [nullPulseDict]*(len(ddsFreqs))
pulseDicts[selChanIndex] = injectPulseDict
print 'loading dds freqs'
fpga.write_int(startRegisterName,0) #do not read from qdr while writing
loadDdsDict = loadDdsToMem(fpga,waveFreqs=ddsFreqs,phases=ddsPhases,sampleRate=ddsSampleRate,nSamplesPerCycle=nDdsSamplesPerCycle,nBitsPerSamplePair=nBitsPerDdsSamplePair,nSamples=nDdsSamples,phasePulseDicts=pulseDicts)
nTaps = 30
nFirBits = 12
firBinPt = 9
if bLoadFir:
print 'loading programmable FIR filter coefficients'
for iChan in xrange(nChannelsPerStream):
print iChan
fpga.write_int('prog_fir0_load_chan',0)
time.sleep(.1)
fir = np.loadtxt('/home/kids/SDR/Projects/Filters/matched30_50.0us.txt')
#fir = np.arange(nTaps,dtype=np.uint32)
#firInts = np.left_shift(fir,5)
#fir = np.zeros(nTaps)
#fir = np.ones(nTaps)
#fir[1] = 1./(1.+iChan)
#fir[1] = 1.
#nSmooth=4
#fir[-nSmooth:] = 1./nSmooth
firInts = np.array(fir*(2**firBinPt),dtype=np.int32)
writeBram(fpga,'prog_fir0_single_chan_coeffs',firInts,nRows=nTaps,nBytesPerSample=4)
time.sleep(.1)
loadVal = (1<<8) + iChan #first bit indicates we will write, next 8 bits is the chan number
fpga.write_int('prog_fir0_load_chan',loadVal)
time.sleep(.1)
fpga.write_int('prog_fir0_load_chan',selChanIndex)
toneFreq = quantizedResFreq #resFreq + dacFreqResolution
if bLoadDac:
print 'loading dac lut'
waveFreqs = [toneFreq]
phases = [1.39]
loadDict = loadWaveToMem(fpga,waveFreqs=waveFreqs,phases=phases,sampleRate=sampleRate,nSamplesPerCycle=nSamplesPerCycle,nBytesPerMemSample=nBytesPerMemSample,nBitsPerSamplePair=nBitsPerSamplePair,memNames = memNames,nSamples=nSamples,memType=memType,dynamicRange=dynamicRange)
if bSetThresh:
setThresh(fpga,chanNum=selChanIndex)
if bStreamPhase:
startStream(fpga,selChanIndex=selChanIndex)
fpga.write_int(startRegisterName,1)
'setting bch',selChanIndex
fpga.write_int('sel_bch',selChanIndex)
snapDict = snapDdc(fpga,bSnapAll=False,selBinIndex=selBinIndex,selChanIndex=selChanIndex,selChanStream=selChanStream,ddsAddrTrig=ddsAddrTrig,bPlot=True)
rawPhase = snapDict['rawPhase']
mix = snapDict['mix']
filtPhase = snapDict['filtPhase']
#basePhase = snapDict['basePhase']
trig = np.roll(snapDict['trig'],-2) #there is an extra 2 cycle delay in firmware between we_out and phase
#trig2 = np.roll(snapDict['trig2'],-2)
print 'photon triggers',np.sum(trig)#,np.sum(trig2)
mixPhaseDeg = 180./np.pi*np.angle(mix)
rawPhaseDeg = 180./np.pi*rawPhase
filtPhaseDeg = 180./np.pi*filtPhase
#basePhaseDeg = 180./np.pi*basePhase
trigPhases = filtPhaseDeg[trig]
#trig2Phases = filtPhaseDeg[trig2]
dt = nChannelsPerStream/fpgaClockRate
t = dt*np.arange(len(rawPhase))
t2 = (dt/2.)*np.arange(len(mixPhaseDeg)) #two IQ points per cycle are snapped
t3 = dt*np.arange(len(filtPhase))
trigTimes = t3[trig]
#trig2Times = t3[trig2]
fig,ax = plt.subplots(1,1)
ax.plot(t/1.e-6,rawPhaseDeg,'k.-',label='raw')
ax.plot(t2/1.e-6,mixPhaseDeg,'r.-',label='mix')
ax.plot(t3/1.e-6,filtPhaseDeg,'b.-',label='filt')
#ax.plot(t3/1.e-6,basePhaseDeg,'m.--',label='filt')
ax.plot(trigTimes/1.e-6,trigPhases,'mo',label='trig')
#ax.plot(trig2Times/1.e-6,trig2Phases,'gv')
ax.set_ylabel('phase ($^{\circ}$)')
ax.set_xlabel('time ($\mu$s)')
ax.legend(loc='best')
plt.show()
stopStream(fpga)
print 'done!'
| gpl-2.0 |
BhallaLab/moose-examples | traub_2005/py/display_morphology.py | 1 | 5931 | # display_morphology.py ---
#
# Filename: display_morphology.py
# Description:
# Author:
# Maintainer:
# Created: Fri Mar 8 11:26:13 2013 (+0530)
# Version:
# Last-Updated: Sun Jun 25 15:09:55 2017 (-0400)
# By: subha
# Update #: 390
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Draw the schematic diagram of cells using networkx
#
#
# Change log:
#
#
#
#
# Code:
"""
Display/save the topology of one or all cells in traub_2005 demo.
command line options (all are optional):
-c celltype : display topology of cell type 'celltype'. If unspecified,
all cell types are displayed
-p filename : save output to fiel specified by 'filename'
-l : show labels of the compartments
-h,--help : show this help
"""
from __future__ import print_function
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import moose
import cells
def node_sizes(g):
"""Calculate the 2D projection area of each compartment.
g: graph whose nodes are moose Compartment objects.
return a numpy array with compartment areas in 2D projection
normalized by the maximum.
"""
sizes = []
comps = [moose.element(n) for n in g.nodes()]
sizes = np.array([c.length * c.diameter for c in comps])
soma_i = [ii for ii in range(len(comps)) if comps[ii].path.endswith('comp_1')]
sizes[soma_i] *= np.pi/4 # for soma, length=diameter. So area is dimater^2 * pi / 4
return sizes / max(sizes)
def cell_to_graph(cell, label=False):
"""Convert a MOOSE compartmental neuron into a graph describing
the topology of the compartments
"""
p = '%s/comp_1' % cell.path
soma = moose.element(p) if moose.exists(p) else moose.Compartment(p)
if len(soma.neighbors['axialOut']) > 0:
msg = 'raxialOut'
elif len(soma.neighbors['distalOut']) > 0:
msg = 'distalOut'
else:
raise Exception('No neighbors on raxial or distal')
es = [(c1.path, c2[0].path, {'weight': 2/ (c1.Ra + c2[0].Ra)}) \
for c1 in moose.wildcardFind('%s/##[ISA=CompartmentBase]' % (cell.path)) \
for c2 in moose.element(c1).neighbors[msg]]
g = nx.Graph()
g.add_edges_from(es)
if label:
for v in g.nodes():
g.node[v]['label'] = v.rpartition('/')[-1]
return g
def axon_dendrites(g):
"""Get a 2-tuple with list of nodes representing axon and list of
nodes representing dendrites.
g: graph whose nodes are compartments
"""
axon = []
soma_dendrites = []
for n in g.nodes():
if moose.exists('%s/CaPool' % (n)):
soma_dendrites.append(n)
else:
axon.append(n)
return (axon, soma_dendrites)
def plot_cell_topology(cell, label=False):
g = cell_to_graph(cell, label=label)
axon, sd = axon_dendrites(g)
node_size = node_sizes(g)
weights = np.array([g[e[0]][e[1]]['weight'] for e in g.edges()])
try:
pos = nx.graphviz_layout(g,prog='twopi',root=cell.path + '/comp_1')
except (NameError, AttributeError) as e:
# this is the best networkx can do by itself. Its Furchtman
# Reingold layout ends up with overlapping edges even for a
# tree. igraph does much better.
pos = nx.spectral_layout(g)
nx.draw_networkx_edges(g, pos, width=10*weights/max(weights), edge_color='gray', alpha=0.8)
nx.draw_networkx_nodes(g, pos, with_labels=False,
nnode_size=node_size * 500,
node_color=['k' if x in axon else 'gray' for x in g.nodes()],
linewidths=[1 if n.endswith('comp_1') else 0 for n in g.nodes()],
alpha=0.8)
if label:
labels = dict([(n, g.node[n]['label']) for n in g.nodes()])
nx.draw_networkx_labels(g, pos, labels=labels)
plt.title(cell.__class__.__name__)
from matplotlib.backends.backend_pdf import PdfPages
import sys
from getopt import getopt
if __name__ == '__main__':
print(sys.argv)
optlist, args = getopt(sys.argv[1:], 'lhp:c:', ['help'])
celltype = ''
pdf = ''
label = False
for arg in optlist:
if arg[0] == '-c':
celltype = arg[1]
elif arg[0] == '-p':
pdf = arg[1]
elif arg[0] == '-l':
label = True
elif arg[0] == '-h' or arg[0] == '--help':
print('Usage: %s [-c CellType [-p filename]]' % (sys.argv[0]))
print('Display/save the morphology of cell type "CellType".')
print('Options:')
print('-c celltype (optional) display only an instance of the specified cell type. If CellType is empty or not specified, all prototype cells are displayed.')
print('-l label the compartments')
print('-p filename (optional) save outputin a pdf file named "filename".')
print('-h,--help print this help')
sys.exit(0)
print('args', optlist, args)
figures = []
if len(celltype) > 0:
try:
fig = plt.figure()
figures.append(fig)
cell = cells.init_prototypes()[celltype]
# print 'Label', label
plot_cell_topology(cell, label=label)
except KeyError:
print('%s: no such cell type. Available are:' % (celltype))
for ii in list(cells.init_prototypes().keys()):
print(ii, end=' ')
print()
sys.exit(1)
else:
for cell, proto in list(cells.init_prototypes().items()):
figures.append(plt.figure())
plot_cell_topology(proto, label=label)
plt.axis('off')
if len(pdf) > 0:
pdfout = PdfPages(pdf)
for fig in figures:
pdfout.savefig(fig)
pdfout.close()
else:
plt.show()
#
# display_morphology.py ends here
| gpl-2.0 |
ephes/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
datapythonista/pandas | pandas/tests/tslibs/test_fields.py | 3 | 1124 | import numpy as np
from pandas._libs.tslibs import fields
import pandas._testing as tm
def test_fields_readonly():
# https://github.com/vaexio/vaex/issues/357
# fields functions shouldn't raise when we pass read-only data
dtindex = np.arange(5, dtype=np.int64) * 10 ** 9 * 3600 * 24 * 32
dtindex.flags.writeable = False
result = fields.get_date_name_field(dtindex, "month_name")
expected = np.array(["January", "February", "March", "April", "May"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = fields.get_date_field(dtindex, "Y")
expected = np.array([1970, 1970, 1970, 1970, 1970], dtype=np.int32)
tm.assert_numpy_array_equal(result, expected)
result = fields.get_start_end_field(dtindex, "is_month_start", None)
expected = np.array([True, False, False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
# treat dtindex as timedeltas for this next one
result = fields.get_timedelta_field(dtindex, "days")
expected = np.arange(5, dtype=np.int32) * 32
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
jpautom/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
dpaiton/OpenPV | pv-core/analysis/python/plot_fourier_activity.py | 1 | 40569 | """
Plot the highest activity of four different bar positionings
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
import radialProfile
import pylab as py
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
if coord == 3:
check = ((x - 0.5) % 16)
if check < 4:
x2 = ((x - 0.5) % 16) - 7 + (x / 16.0)
y2 = ((y - 0.5) % 16) - 7 + (y / 16.0)
elif check < 10:
x2 = ((x - 0.5) % 16) - 7.5 + (x / 16.0)
y2 = ((y - 0.5) % 16) - 7.5 + (y / 16.0)
else:
x2 = ((x - 0.5) % 16) - 8 + (x / 16.0)
y2 = ((y - 0.5) % 16) - 8 + (y / 16.0)
x = (x / 16.0)
y = (y / 16.0)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = P[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4d, y=%1.4d, x2=%1.4d, y2=%1.4d'%(int(x), int(y), int(x2), int(y2))
if coord == 1:
x2 = (x / 20.0)
y2 = (y / 20.0)
x = (x / 5.0)
y = (y / 5.0)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = P[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4d, y=%1.4d, x2=%1.4d, y2=%1.4d'%(int(x), int(y), int(x2), int(y2))
"""
Show how to modify the coordinate formatter to report the image "z"
value of the nearest pixel given x and y
"""
extended = False
vmax = 100.0 # Hz
if len(sys.argv) < 22:
print "usage: plot_avg_activity filename1, filename2, filename3, filename4, filename5, filename6, filename7, filename8, filename9, filename10, filename11, filename12, filename13, filename14, filename15, filename16 [end_time step_time begin_time], test filename, On-weigh filename, Off-weight filename"
sys.exit()
#if len(sys.argv) >= 6:
# vmax = float(sys.argv[5])
a1 = rs.PVReadSparse(sys.argv[1], extended)
a2 = rs.PVReadSparse(sys.argv[2], extended)
a3 = rs.PVReadSparse(sys.argv[3], extended)
a4 = rs.PVReadSparse(sys.argv[4], extended)
a5 = rs.PVReadSparse(sys.argv[5], extended)
a6 = rs.PVReadSparse(sys.argv[6], extended)
a7 = rs.PVReadSparse(sys.argv[7], extended)
a8 = rs.PVReadSparse(sys.argv[8], extended)
a9 = rs.PVReadSparse(sys.argv[9], extended)
a10 = rs.PVReadSparse(sys.argv[10], extended)
a11 = rs.PVReadSparse(sys.argv[11], extended)
a12 = rs.PVReadSparse(sys.argv[12], extended)
a13 = rs.PVReadSparse(sys.argv[13], extended)
a14 = rs.PVReadSparse(sys.argv[14], extended)
a15 = rs.PVReadSparse(sys.argv[15], extended)
a16 = rs.PVReadSparse(sys.argv[16], extended)
end = int(sys.argv[17])
step = int(sys.argv[18])
begin = int(sys.argv[19])
endtest = end
steptest = step
begintest = begin
atest = rs.PVReadSparse(sys.argv[20], extended)
w = rw.PVReadWeights(sys.argv[21])
wO = rw.PVReadWeights(sys.argv[22])
zerange = end
count1 = 0
count2 = 0
count3 = 0
count4 = 0
count5 = 0
count6 = 0
count7 = 0
count8 = 0
count9 = 0
count10 = 0
count11 = 0
count12 = 0
count13 = 0
count14 = 0
count15 = 0
count16 = 0
count17 = 0
count18 = 0
margin = 15
histo = np.zeros((1, 16))
pa = []
print "(begin, end, step, max) == ", begin, end, step, vmax
for endtest in range(begintest+steptest, steptest+1, steptest):
Atest = atest.avg_activity(begintest, endtest)
lenofo = len(Atest)
for i in range(lenofo):
for j in range(lenofo):
pa = np.append(pa, Atest[i,j])
median = np.median(pa)
avg = np.mean(pa)
AW = np.zeros((lenofo, lenofo))
AWO = np.zeros((lenofo, lenofo))
SUMAW = np.zeros((lenofo, lenofo))
countpos = 0
space = 1
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
nf = w.nf
d = np.zeros((4,4))
coord = 1
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
numpat = w.numPatches
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
countnum = 0
A1pos = np.array([0,0])
im2 = np.zeros((nx_im, ny_im))
im2[:,:] = (w.max - w.min) / 2.
print "avg = ", avg
print "median = ", median
#a2.rewind()
co = 0
for g in range(2):
if g == 0:
for end in range(begin+step, step+1, step):
countpos = 0
A1 = a1.avg_activity(begin, end)
A2 = a2.avg_activity(begin, end)
A3 = a3.avg_activity(begin, end)
A4 = a4.avg_activity(begin, end)
A5 = a5.avg_activity(begin, end)
A6 = a6.avg_activity(begin, end)
A7 = a7.avg_activity(begin, end)
A8 = a8.avg_activity(begin, end)
A9 = a9.avg_activity(begin, end)
A10 = a10.avg_activity(begin, end)
A11 = a11.avg_activity(begin, end)
A12 = a12.avg_activity(begin, end)
A13 = a13.avg_activity(begin, end)
A14 = a14.avg_activity(begin, end)
A15 = a15.avg_activity(begin, end)
A16 = a16.avg_activity(begin, end)
AF = np.zeros((lenofo, lenofo))
lenofo = len(A1)
lenofb = lenofo * lenofo
beingplotted = []
for i in range(lenofo):
for j in range(lenofo):
#print A1[i, j]
check = [A1[i,j], A2[i,j], A3[i,j], A4[i,j], A5[i,j], A6[i,j], A7[i,j], A8[i,j], A9[i,j], A10[i,j], A11[i,j], A12[i,j], A13[i,j], A14[i,j], A15[i,j], A16[i,j]]
checkmax = np.max(check)
wheremax = np.argmax(check)
half = checkmax / 2.0
sort = np.sort(check)
co = 0
if wheremax == 0:
AW[i, j] = 1
if wheremax == 1:
AW[i, j] = 2
if wheremax == 2:
AW[i, j] = 3
if wheremax == 3:
AW[i, j] = 4
if wheremax == 4:
AW[i, j] = 5
if wheremax == 5:
AW[i, j] = 6
if wheremax == 6:
AW[i, j] = 7
if wheremax == 7:
AW[i, j] = 8
if wheremax == 8:
AW[i, j] = 9
if wheremax == 9:
AW[i, j] = 10
if wheremax == 10:
AW[i, j] = 11
if wheremax == 11:
AW[i, j] = 12
if wheremax == 12:
AW[i, j] = 13
if wheremax == 13:
AW[i, j] = 14
if wheremax == 14:
AW[i, j] = 15
if wheremax == 15:
AW[i, j] = 16
#print AF[i, j]
#print "check = ", sort
#print "half = ", half
for e in range(len(check)):
if check[e] >= half:
co += 1
if co == 1:
AF[i, j] = 0.0
count1 += 1
AWO[i, j] = 1.0
if wheremax == 0:
countnum += 1
if i > margin and i < (w.nx - margin):
if j > margin and j < (w.ny - margin):
print "3rd Leap!"
if countpos == 0:
A1pos = [i, j]
else:
A1pos = np.vstack((A1pos, [i, j]))
countpos+=1
elif co == 2:
AF[i, j] = 0.06
count2 += 1
AWO[i, j] = 2.0
elif co == 3:
AF[i, j] = 0.12
count3 += 1
AWO[i, j] = 3.0
elif co == 4:
AF[i, j] = 0.18
count4 += 1
AWO[i, j] = 4.0
elif co == 5:
AF[i, j] = 0.24
count5 += 1
AWO[i, j] = 5.0
elif co == 6:
AF[i, j] = 0.3
count6 += 1
AWO[i, j] = 6.0
#######
#if A1[i ,f]
#######
elif co == 7:
AF[i, j] = 0.36
count7 += 1
AWO[i, j] = 7.0
elif co == 8:
AF[i, j] = 0.42
count8 += 1
AWO[i, j] = 8.0
elif co == 9:
AF[i, j] = 0.48
count9 += 1
AWO[i, j] = 9.0
elif co == 10:
AF[i, j] = 0.54
count10 += 1
AWO[i, j] = 10.0
elif co == 11:
AF[i, j] = 0.60
count11 += 1
AWO[i, j] = 11.0
elif co == 12:
AF[i, j] = 0.66
count12 += 1
AWO[i, j] = 12.0
elif co == 13:
AF[i, j] = 0.72
count13 += 1
AWO[i, j] = 13.0
elif co == 14:
AF[i, j] = 0.78
count14 += 1
AWO[i, j] = 14.0
elif co == 15:
AF[i, j] = 0.84
count15 += 1
AWO[i, j] = 15.0
elif co == 16:
AF[i, j] = 0.9
count16 += 1
AWO[i, j] = 16.0
else:
AF[i, j] = 1.0
count18 += 1
#print "ELSE"
#print "co = ", co
#print
#print AF[i ,j]
#print
#print "13", count13
#print "14", count14
#print "15", count15
#print "16", count16
print AW
F1 = np.fft.fft2(AW)
F2 = np.fft.fftshift(F1)
psd2D = np.abs(F2)**2
psd1D = radialProfile.azimuthalAverage(psd2D)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(np.log10(AW), cmap=py.cm.Greys)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(np.log10(psd2D))
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.semilogy(psd1D)
ax3.set_xlabel('Spatial Frequency')
ax3.set_ylabel('Power Spectrum')
plt.show()
sys.exit()
if 1 ==1:
if 1 == 1:
for i in range(128):
for j in range(128):
if AWO[i, j] == 1:
histo[0, 0] += 1
if AWO[i, j] == 2:
histo[0, 1] += 1
if AWO[i, j] == 3:
histo[0, 2] += 1
if AWO[i, j] == 4:
histo[0, 3] += 1
if AWO[i, j] == 5:
histo[0, 4] += 1
if AWO[i, j] == 6:
histo[0, 5] += 1
if AWO[i, j] == 7:
histo[0, 6] += 1
if AWO[i, j] == 8:
histo[0, 7] += 1
if AWO[i, j] == 9:
histo[0, 8] += 1
if AWO[i, j] == 10:
histo[0, 9] += 1
if AWO[i, j] == 11:
histo[0, 10] += 1
if AWO[i, j] == 12:
histo[0, 11] += 1
if AWO[i, j] == 13:
histo[0, 12] += 1
if AWO[i, j] == 14:
histo[0, 13] += 1
if AWO[i, j] == 15:
histo[0, 14] += 1
if AWO[i, j] == 16:
histo[0, 15] += 1
ahist = []
for i in range(16):
ahist = np.append(ahist, histo[0,i])
print "ahist = ", ahist
a1.rewind()
a2.rewind()
a3.rewind()
a4.rewind()
a5.rewind()
a6.rewind()
a7.rewind()
a8.rewind()
a9.rewind()
a10.rewind()
a11.rewind()
a12.rewind()
a13.rewind()
a14.rewind()
a15.rewind()
a16.rewind()
countg = 0
testgraph = []
test = []
numofsteps = 10
print "AW = ", AW
print "A1pos = ", A1pos
for k in range(zerange): ####### range(step)
if k%1000 == 0:
print "at ", k
A1t = []
A2t = []
A3t = []
A4t = []
A5t = []
A6t = []
A7t = []
A8t = []
A9t = []
A10t = []
A11t = []
A12t = []
A13t = []
A14t = []
A15t = []
A16t = []
countg += 1
A1A = a1.next_record()
#A2A = a2.next_record()
#A3A = a3.next_record()
#A4A = a4.next_record()
#A5A = a5.next_record()
#A6A = a6.next_record()
#A7A = a7.next_record()
#A8A = a8.next_record()
#A9A = a9.next_record()
#A10A = a10.next_record()
#A11A = a11.next_record()
#A12A = a12.next_record()
#A13A = a13.next_record()
#A14A = a14.next_record()
#A15A = a15.next_record()
#A16A = a16.next_record()
A1t = 0
for g in range(np.shape(A1pos)[0]):
w = A1pos[g]
i = w[0]
j = w[1]
for h in range(len(A1A)):
if A1A[h] == ((lenofo*i) + j):
A1t += 1
"""
if AW[i, j] == 2:
t = 0
for h in range(len(A2A)):
if A2A[h] == ((lenofo * i) + j):
t = 1
if t ==1:
A2t = np.append(A2t,1)
else:
A2t = np.append(A2t, 0)
if AW[i, j] == 3:
t = 0
for h in range(len(A3A)):
if A3A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A3t = np.append(A3t,1)
else:
A3t = np.append(A3t, 0)
if AW[i, j] == 4:
t = 0
for h in range(len(A4A)):
if A4A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A4t = np.append(A4t,1)
else:
A4t = np.append(A4t, 0)
if AW[i, j] == 5:
t = 0
for h in range(len(A5A)):
if A5A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A5t = np.append(A5t,1)
else:
A5t = np.append(A5t, 0)
if AW[i, j] == 6:
t = 0
for h in range(len(A6A)):
if A6A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A6t = np.append(A6t,1)
else:
A6t = np.append(A6t, 0)
if AW[i, j] == 7:
t = 0
for h in range(len(A7A)):
if A7A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A7t = np.append(A7t,1)
else:
A7t = np.append(A7t, 0)
if AW[i, j] == 8:
t = 0
for h in range(len(A8A)):
if A8A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A8t = np.append(A8t,1)
else:
A8t = np.append(A8t, 0)
if AW[i, j] == 9:
t = 0
for h in range(len(A9A)):
if A9A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A9t = np.append(A9t,1)
else:
A9t = np.append(A9t, 0)
if AW[i, j] == 10:
t = 0
for h in range(len(A10A)):
if A10A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A10t = np.append(A10t,1)
else:
A10t = np.append(A10t, 0)
if AW[i, j] == 11:
t = 0
for h in range(len(A11A)):
if A11A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A11t = np.append(A11t,1)
else:
A11t = np.append(A11t, 0)
if AW[i, j] == 12:
t = 0
for h in range(len(A12A)):
if A12A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A12t = np.append(A12t,1)
else:
A12t = np.append(A12t, 0)
if AW[i, j] == 13:
t = 0
for h in range(len(A13A)):
if A13A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A13t = np.append(A13t,1)
else:
A13t = np.append(A13t, 0)
if AW[i, j] == 14:
t = 0
for h in range(len(A14A)):
if A14A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A14t = np.append(A14t,1)
else:
A14t = np.append(A14t, 0)
if AW[i, j] == 15:
t = 0
for h in range(len(A15A)):
if A15A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A15t = np.append(A15t,1)
else:
A15t = np.append(A15t, 0)
if AW[i, j] == 16:
t = 0
for h in range(len(A16A)):
if A16A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A16t = np.append(A16t,1)
else:
A16t = np.append(A16t, 0)
"""
#if np.sum(test) > 0:
# print "test = ", test
# print "sum = ", sum(test)
d = k / numofsteps
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A1p = np.sum(A1t)
else:
A1p = np.append(A1p,np.sum(A1t))
if k == (numofsteps-1):
A1q = np.average(A1p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A1q = np.append(A1q, np.average(A1p))
"""
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A2p = np.sum(A2t)
else:
A2p = np.append(A2p,np.sum(A2t))
if k == (numofsteps-1):
A2q = np.average(A2p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A2q = np.append(A2q, np.average(A2p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A3p = np.sum(A3t)
else:
A3p = np.append(A3p,np.sum(A3t))
if k == (numofsteps-1):
A3q = np.average(A3p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A3q = np.append(A3q, np.average(A3p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A4p = np.sum(A4t)
else:
A4p = np.append(A4p,np.sum(A4t))
if k == (numofsteps-1):
A4q = np.average(A4p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A4q = np.append(A4q, np.average(A4p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A5p = np.sum(A5t)
else:
A5p = np.append(A5p,np.sum(A5t))
if k == (numofsteps-1):
A5q = np.average(A5p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A5q = np.append(A5q, np.average(A5p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A6p = np.sum(A6t)
else:
A6p = np.append(A6p,np.sum(A6t))
if k == (numofsteps-1):
A6q = np.average(A6p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A6q = np.append(A6q, np.average(A6p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A7p = np.sum(A7t)
else:
A7p = np.append(A7p,np.sum(A7t))
if k == (numofsteps-1):
A7q = np.average(A7p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A7q = np.append(A7q, np.average(A7p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A8p = np.sum(A8t)
else:
A8p = np.append(A8p,np.sum(A8t))
if k == (numofsteps-1):
A8q = np.average(A8p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A8q = np.append(A8q, np.average(A8p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A9p = np.sum(A9t)
else:
A9p = np.append(A9p,np.sum(A9t))
if k == (numofsteps-1):
A9q = np.average(A9p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A9q = np.append(A9q, np.average(A9p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A10p = np.sum(A10t)
else:
A10p = np.append(A10p,np.sum(A10t))
if k == (numofsteps-1):
A10q = np.average(A10p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A10q = np.append(A10q, np.average(A10p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A11p = np.sum(A11t)
else:
A11p = np.append(A11p,np.sum(A11t))
if k == (numofsteps-1):
A11q = np.average(A11p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A11q = np.append(A11q, np.average(A11p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A12p = np.sum(A12t)
else:
A12p = np.append(A12p,np.sum(A12t))
if k == (numofsteps-1):
A12q = np.average(A12p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A12q = np.append(A12q, np.average(A12p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A13p = np.sum(A13t)
else:
A13p = np.append(A13p,np.sum(A13t))
if k == (numofsteps-1):
A13q = np.average(A13p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A13q = np.append(A13q, np.average(A13p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A14p = np.sum(A14t)
else:
A14p = np.append(A14p,np.sum(A14t))
if k == (numofsteps-1):
A14q = np.average(A14p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A14q = np.append(A14q, np.average(A14p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A15p = np.sum(A15t)
else:
A15p = np.append(A15p,np.sum(A15t))
if k == (numofsteps-1):
A15q = np.average(A15p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A15q = np.append(A15q, np.average(A15p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A16p = np.sum(A16t)
else:
A16p = np.append(A16p,np.sum(A16t))
if k == (numofsteps-1):
A16q = np.average(A16p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A16q = np.append(A16q, np.average(A16p))
"""
#for i in range(4):
# testq = np.append(testq, 0)
#if AW[i, j] == 2:
# for g in range(len(A2A)):
# if A2A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 3:
# for g in range(len(A3A)):
# if A3A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 4:
# for g in range(len(A4A)):
# if A4A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 5:
# for g in range(len(A5A)):
# if A5A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 6:
# for g in range(len(A6A)):
# if A6A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 7:
# for g in range(len(A7A)):
# if A7A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 8:
# for g in range(len(A8A)):
# if A8A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 9:
# for g in range(len(A9A)):
# if A9A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 10:
# for g in range(len(A10A)):
# if A10A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 11:
# for g in range(len(A11A)):
# if A11A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 12:
# for g in range(len(A12A)):
# if A12A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 13:
# for g in range(len(A13A)):
# if A13A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 14:
# for g in range(len(A14A)):
# if A14A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#f AW[i, j] == 15:
# for g in range(len(A15A)):
# if A15A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 16:
# for g in range(len(A16A)):
# if A16A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.set_title("SUMAW")
#ax.imshow(SUMAW, cmap=cm.binary, interpolation='nearest')
#test = SUMAW / countg
A1q = ((A1q / countnum) / (numofsteps / 1000.0))
#A2q = (A2q / len(A2t)) / (numofsteps / 100.0)
#A3q = (A3q / len(A3t)) / (numofsteps / 100.0)
#A4q = (A4q / len(A4t)) / (numofsteps / 100.0)
#A5q = (A5q / len(A5t)) / (numofsteps / 100.0)
#A6q = (A6q / len(A6t)) / (numofsteps / 100.0)
#A7q = (A7q / len(A7t)) / (numofsteps / 100.0)
#A8q = (A8q / len(A8t)) / (numofsteps / 100.0)
#A9q = (A9q / len(A9t)) / (numofsteps / 100.0)
#A10q = (A10q / len(A10t)) / (numofsteps / 100.0)
#A11q = (A11q / len(A11t)) / (numofsteps / 100.0)
#A12q = (A12q / len(A12t)) / (numofsteps / 100.0)
#A13q = (A13q / len(A13t)) / (numofsteps / 100.0)
#A14q = (A14q / len(A14t)) / (numofsteps / 100.0)
#A15q = (A15q / len(A15t)) / (numofsteps / 100.0)
#A16q = (A16q / len(A16t)) / (numofsteps / 100.0)
#f = open('averaged-activity.txt', 'w')
#for l in range(200): #((len(A1q)/2)):
# f.write('1; %1.1f; %1.1f\n' %(A1q[l], A1q[l+400]))
#for l in range(200): #((len(A1q)/2)):
# f.write('0; %1.1f; %1.1f\n' %(A1q[l+200], A1q[l+600]))
#print "len = ", len(A1q)
#print "half = ", (len(A1q) / 2)
#sys.exit()
hz = 0.5
fpm = 1000 / hz
activity = []
for i in range((zerange/2)):
if i%fpm == 0:
w = i
e = w + 1000
if i >= w and i <= e:
activity = np.append(activity, 1)
else:
activity = np.append(activity, 0)
fig = plt.figure()
ax = fig.add_subplot(212)
ax.set_title('Image')
ax.set_xlabel("Time (ms)")
ax.set_autoscale_on(False)
ax.set_ylim(0,1.1)
ax.set_xlim(0, len(activity))
ax.plot(np.arange(len(activity)), activity, color='y', ls = '-')
#fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("test")
ax.set_ylabel("Avg Firing Rate for A1")
ax.plot(np.arange(len(A1q)), A1q, color=cm.Paired(0.06) , ls = '-')
#ax.plot(np.arange(len(A2q)), A2q, color=cm.Paired(0.12) , ls = '-')
#ax.plot(np.arange(len(A3q)), A3q, color=cm.Paired(0.18) , ls = '-')
#ax.plot(np.arange(len(A4q)), A4q, color=cm.Paired(0.24) , ls = '-')
#ax.plot(np.arange(len(A5q)), A5q, color=cm.Paired(0.30) , ls = '-')
#ax.plot(np.arange(len(A6q)), A6q, color=cm.Paired(0.36) , ls = '-')
#ax.plot(np.arange(len(A7q)), A7q, color=cm.Paired(0.42) , ls = '-')
#ax.plot(np.arange(len(A8q)), A8q, color=cm.Paired(0.48) , ls = '-')
#ax.plot(np.arange(len(A9q)), A9q, color=cm.Paired(0.54) , ls = '-')
#ax.plot(np.arange(len(A10q)), A10q, color=cm.Paired(0.60) , ls = '-')
#ax.plot(np.arange(len(A11q)), A11q, color=cm.Paired(0.66) , ls = '-')
#ax.plot(np.arange(len(A12q)), A12q, color=cm.Paired(0.72) , ls = '-')
#ax.plot(np.arange(len(A13q)), A13q, color=cm.Paired(0.78) , ls = '-')
#ax.plot(np.arange(len(A14q)), A14q, color=cm.Paired(0.84) , ls = '-')
#ax.plot(np.arange(len(A15q)), A15q, color=cm.Paired(0.90) , ls = '-')
#ax.plot(np.arange(len(A16q)), A16q, color=cm.Paired(0.96) , ls = '-')
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set_title('Image')
ax2.set_xlabel("Time (ms)")
ax2.plot(np.arange(len(ahist)), ahist, color='y', ls = '-')
plt.show()
sys.exit()
if 1 == 1:
kd = []
AW = AW.reshape(lenofb, 1)
AWO = AWO.reshape(lenofb, 1)
count = 0
for k in range(w.numPatches):
p = w.next_patch()
pO = wO.next_patch()
kx = conv.kyPos(k, nx, ny, nf)
ky = conv.kyPos(k, nx, ny, nf)
if len(p) != nxp * nyp:
continue
#print "p = ", p
count += 1
#print "count = ", count
if AW[k] == 1:
if len(kd) == 0:
don = p
doff = pO
kd = np.append(don, doff)
else:
don = p
doff = pO
e = np.append(don, doff)
kd = np.vstack((kd, e))
p = np.reshape(p, (nxp, nyp))
pO = np.reshape(pO, (nxp, nyp))
else:
p = d
pO = d
#print "post p", p
x = space + (space + nxp) * (k % nx)
y = space + (space + nyp) * (k / nx)
im[y:y+nyp, x:x+nxp] = p
im2[y:y+nyp, x:x+nxp] = pO
k = 16
wd = sp.whiten(kd)
result = sp.kmeans2(wd, k)
cluster = result[1]
nx_im5 = 2 * (nxp + space) + space
ny_im5 = k * (nyp + space) + space
im5 = np.zeros((nx_im5, ny_im5))
im5[:,:] = (w.max - w.min) / 2.
b = result[0]
c = np.hsplit(b, 2)
con = c[0]
coff = c[1]
for i in range(k):
d = con[i].reshape(nxp, nyp)
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im5[y:y+nyp, x:x+nxp] = d
for i in range(k):
e = coff[i].reshape(nxp, nyp)
i = i + k
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im5[y:y+nyp, x:x+nxp] = e
kcount1 = 0.0
kcount2 = 0.0
kcount3 = 0.0
kcount4 = 0.0
kcount5 = 0.0
kcount6 = 0.0
kcount7 = 0.0
kcount8 = 0.0
kcount9 = 0.0
kcount10 = 0.0
kcount11 = 0.0
kcount12 = 0.0
kcount13 = 0.0
kcount14= 0.0
kcount15 = 0.0
kcount16 = 0.0
acount = len(kd)
for i in range(acount):
if cluster[i] == 0:
kcount1 = kcount1 + 1
if cluster[i] == 1:
kcount2 = kcount2 + 1
if cluster[i] == 2:
kcount3 = kcount3 + 1
if cluster[i] == 3:
kcount4 = kcount4 + 1
if cluster[i] == 4:
kcount5 = kcount5 + 1
if cluster[i] == 5:
kcount6 = kcount6 + 1
if cluster[i] == 6:
kcount7 = kcount7 + 1
if cluster[i] == 7:
kcount8 = kcount8 + 1
if cluster[i] == 8:
kcount9 = kcount9 + 1
if cluster[i] == 9:
kcount10 = kcount10 + 1
if cluster[i] == 10:
kcount11 = kcount11 + 1
if cluster[i] == 11:
kcount12 = kcount12 + 1
if cluster[i] == 12:
kcount13 = kcount13 + 1
if cluster[i] == 13:
kcount14 = kcount14 + 1
if cluster[i] == 14:
kcount15 = kcount15 + 1
if cluster[i] == 15:
kcount16 = kcount16 + 1
kcountper1 = kcount1 / acount
kcountper2 = kcount2 / acount
kcountper3 = kcount3 / acount
kcountper4 = kcount4 / acount
kcountper5 = kcount5 / acount
kcountper6 = kcount6 / acount
kcountper7 = kcount7 / acount
kcountper8 = kcount8 / acount
kcountper9 = kcount9 / acount
kcountper10 = kcount10 / acount
kcountper11 = kcount11 / acount
kcountper12 = kcount12 / acount
kcountper13 = kcount13 / acount
kcountper14 = kcount14 / acount
kcountper15 = kcount15 / acount
kcountper16 = kcount16 / acount
h = [count1, count2, count3, count4, count5, count6, count7, count8, count9, count10, count11, count12, count13, count14, count15, count16, count18]
h2 = [0, count1, count2, count3, count4, count5, count6, count7, count8, count9, count10, count11, count12, count13, count14, count15, count16, count18]
fig4 = plt.figure()
ax4 = fig4.add_subplot(111, axisbg='darkslategray')
loc = np.array(range(len(h)))+0.5
width = 1.0
ax4.bar(loc, h, width=width, bottom=0, color='y')
ax4.plot(np.arange(len(h2)), h2, ls = '-', marker = 'o', color='y')
ax4.set_title("Number of Neurons that Respond to Higher than .5 max firing rate")
ax4.set_ylabel("Number of Neurons")
ax4.set_xlabel("Number of Presented Lines")
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('1=%1.0i 2=%1.0i 3=%1.0i 4=%1.0i 5=%1.0i 6=%1.0i 7=%1.0i 8%1.0i\n 9=%1.0i 10=%1.0i 11=%1.0i 12=%1.0i 13=%1.0i 14=%1.0i 15=%1.0i 16=%1.0i none=%1.0i' %(count1, count2, count3, count4, count5, count6, count7, count8, count9, count10, count11, count12, count13, count14, count15, count16, count18))
ax.set_ylabel('Ky GLOBAL')
ax.set_title('Activity: min=%1.1f, max=%1.1f time=%d' %(0, 8, a1.time))
#ax.format_coord = format_coord
ax.imshow(AF, cmap=cm.binary, interpolation='nearest', vmin=0., vmax=1)
ax.text(140.0, 0.0, "How Many Above Half of Max")
ax.text(140.0, 5.0, "1", backgroundcolor = cm.binary(0.0))
ax.text(140.0, 10.0, "2", backgroundcolor = cm.binary(0.06))
ax.text(140.0, 15.0, "3", backgroundcolor = cm.binary(0.12))
ax.text(140.0, 20.0, "4", backgroundcolor = cm.binary(0.18))
ax.text(140.0, 25.0, "5", backgroundcolor = cm.binary(0.24))
ax.text(140.0, 30.0, "6", backgroundcolor = cm.binary(0.30))
ax.text(140.0, 35.0, "7", backgroundcolor = cm.binary(0.36))
ax.text(140.0, 40.0, "8", backgroundcolor = cm.binary(0.42))
ax.text(140.0, 45.0, "9", backgroundcolor = cm.binary(0.48))
ax.text(140.0, 50.0, "10", backgroundcolor = cm.binary(0.54))
ax.text(140.0, 55.0, "11", backgroundcolor = cm.binary(0.60))
ax.text(140.0, 60.0, "12", backgroundcolor = cm.binary(0.66))
ax.text(140.0, 66.0, "13", backgroundcolor = cm.binary(0.72))
ax.text(140.0, 70.0, "14", backgroundcolor = cm.binary(0.78))
ax.text(140.0, 75.0, "15", backgroundcolor = cm.binary(0.84))
ax.text(140.0, 80.0, "16", backgroundcolor = cm.binary(0.9))
ax.text(140.0, 85.0, "nothing", color = 'w', backgroundcolor = cm.binary(1.0))
#fig2 = plt.figure()
#ax2 = fig2.add_subplot(111)
#ax2.set_xlabel('Kx GLOBAL')
#ax2.set_ylabel('Ky GLOBAL')
#ax2.set_title('Weight On Patches')
#ax2.format_coord = format_coord
#ax2.imshow(im, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
#fig3 = plt.figure()
#ax3 = fig3.add_subplot(111)
#ax3.set_xlabel('Kx GLOBAL')
#ax3.set_ylabel('Ky GLOBAL')
#ax3.set_title('Weight Off Patches')
#ax3.format_coord = format_coord
#ax3.imshow(im2, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
fig = plt.figure()
ax = fig.add_subplot(111)
textx = (-7/16.0) * k
texty = (10/16.0) * k
ax.set_title('On and Off K-means')
ax.set_axis_off()
ax.text(textx, texty,'ON\n\nOff', fontsize='xx-large', rotation='horizontal')
ax.text( -5, 12, "Percent %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" %(kcountper1, kcountper2, kcountper3, kcountper4, kcountper5, kcountper6, kcountper7, kcountper8, kcountper9, kcountper10, kcountper11, kcountper12, kcountper13, kcountper14, kcountper15, kcountper16), fontsize='large', rotation='horizontal')
ax.text(-4, 14, "Patch 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", fontsize='x-large', rotation='horizontal')
ax.imshow(im5, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
plt.show()
#end fig loop
sys.exit()
| epl-1.0 |
kjung/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
raymond91125/tissue_enrichment_tool_hypergeometric_test | tea_paper_docs/src/hgf_benchmark_script.py | 4 | 14884 | # -*- coding: utf-8 -*-
"""
A script to benchmark TEA.
@david angeles
dangeles@caltech.edu
"""
import tissue_enrichment_analysis as tea # the library to be used
import pandas as pd
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import re
import matplotlib as mpl
sns.set_context('paper')
# pd.set_option('display.float_format', lambda x:'%f'%x)
pd.set_option('precision', 3)
# this script generates a few directories.
dirOutput = '../output/'
dirSummaries = '../output/SummaryInformation/'
dirHGT25_any = '../output/HGT25_any_Results/'
dirHGT33_any = '../output/HGT33_any_Results/'
dirHGT50_any = '../output/HGT50_any_Results/'
dirHGT100_any = '../output/HGT100_any_Results/'
dirComp = '../output/comparisons/'
DIRS = [dirOutput, dirSummaries, dirHGT25_any,
dirHGT50_any, dirHGT100_any, dirComp]
# open the relevant file
path_sets = '../input/genesets_golden/'
path_dicts = '../input/WS252AnatomyDictionary/'
# Make all the necessary dirs if they don't already exist
for d in DIRS:
if not os.path.exists(d):
os.makedirs(d)
# Make the file that will hold the summaries and make the columns.
with open(dirSummaries+'ExecutiveSummary.csv', 'w') as fSum:
fSum.write('#Summary of results from all benchmarks\n')
fSum.write('NoAnnotations,Threshold,Method,EnrichmentSetUsed,TissuesTested,GenesSubmitted,TissuesReturned,GenesUsed,AvgFold,AvgQ,GenesInDict\n')
# ==============================================================================
# ==============================================================================
# # Perform the bulk of the analysis, run every single dictionary on every set
# ==============================================================================
# ==============================================================================
i = 0
# look in the dictionaries
for folder in os.walk(path_dicts):
# open each one
for f_dict in folder[2]:
if f_dict == '.DS_Store':
continue
tissue_df = pd.read_csv(path_dicts+f_dict)
# tobedropped when tissue dictionary is corrected
annot, thresh = re.findall(r"[-+]?\d*\.\d+|\d+", f_dict)
annot = int(annot)
thresh = float(thresh) # typecasting
method = f_dict[-7:-4]
ntiss = len(tissue_df.columns)
ngenes = tissue_df.shape[0]
# open each enrichment set
for fodder in os.walk(path_sets):
for f_set in fodder[2]:
df = pd.read_csv(path_sets + f_set)
test = df.gene.values
ntest = len(test)
short_name = f_set[16:len(f_set)-16]
df_analysis, unused = tea.enrichment_analysis(test, tissue_df,
alpha=0.05,
show=False)
# save the analysis to the relevant folder
savepath = '../output/HGT'+annot + '_' + method + '_Results/'
df_analysis.to_csv(savepath + f_set+'.csv', index=False)
tea.plot_enrichment_results(df_analysis,
save='savepath'+f_set+'Graph',
ftype='pdf')
nana = len(df_analysis) # len of results
nun = len(unused) # number of genes dropped
avf = df_analysis['Enrichment Fold Change'].mean()
avq = df_analysis['Q value'].mean()
s = '{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'.format(
annot, thresh, method, f_set, ntiss, ntest, nana,
ntest-nun, avf, avq, ngenes)
with open(dirSummaries+'ExecutiveSummary.csv', 'a+') as fSum:
fSum.write(s)
fSum.write('\n')
# Print summary to csv
df_summary = pd.read_csv(dirSummaries+'ExecutiveSummary.csv', comment='#')
# some entries contain nulls. before I remove them, I can inspect them
df_summary.isnull().any()
indexFold = df_summary['AvgFold'].index[df_summary['AvgFold'].apply(np.isnan)]
indexQ = df_summary['AvgQ'].index[df_summary['AvgQ'].apply(np.isnan)]
df_summary.ix[indexFold[0]]
df_summary.ix[indexQ[5]]
# kill all nulls!
df_summary.dropna(inplace=True)
# calculate fraction of tissues that tested significant in each run
df_summary['fracTissues'] = df_summary['TissuesReturned']/df_summary[
'TissuesTested']
df_summary.sort_values(['NoAnnotations', 'Threshold', 'Method'], inplace=True)
# ==============================================================================
# ==============================================================================
# # Plot summary graphs
# ==============================================================================
# ==============================================================================
sel = lambda x, y, z: ((df_summary.NoAnnotations == x) &
(df_summary.Threshold == y) & (df_summary.Method == z))
# KDE of the fraction of all tissues that tested significant
# one color per cutoff
cols = ['#1b9e77', '#d95f02', '#7570b3', '#e7298a', '#66a61e']
ls = ['-', '--', ':'] # used with varying thresh
thresh = df_summary.Threshold.unique()
NoAnnotations = df_summary.NoAnnotations.unique()
def resplot(column, method='any'):
"""
A method to quickly plot all combinations of cutoffs, thresholds.
All cutoffs are same color
All Thresholds are same line style
Parameters:
column -- the column to select
method -- the method used to specify similarity metrics
"""
for j, annots in enumerate(NoAnnotations):
for i, threshold in enumerate(thresh):
if threshold == 1:
continue
s = sel(annots, threshold, method)
df_summary[s][column].plot('kde', color=cols[j], ls=ls[i], lw=4,
label='Annotation Cut-off: {0}, \
Threshold: {1}'.format(annots,
threshold))
resplot('fracTissues')
plt.xlabel('Fraction of all tissues that tested significant')
plt.xlim(0, 1)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'fractissuesKDE_method=any.pdf')
plt.close()
resplot('AvgQ', method='avg')
plt.xlabel('Fraction of all tissues that tested significant')
plt.xlim(0, 0.05)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgQKDE_method=avg.pdf')
plt.close()
resplot('AvgQ')
plt.xlabel('AvgQ value')
plt.xlim(0, .05)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgQKDE_method=any.pdf')
plt.close()
# KDE of the fraction of avgFold
resplot('AvgFold')
plt.xlabel('Avg Fold Change value')
plt.xlim(0, 15)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgFoldChangeKDE.pdf')
plt.close()
def line_prepender(filename, line):
"""Given filename, open it and prepend 'line' at beginning of the file."""
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
# ==============================================================================
# ==============================================================================
# # Detailed analysis of 25 and 50 genes per node dictionaries
# ==============================================================================
# ==============================================================================
def walker(tissue_df, directory, save=True):
"""Given the tissue dictionary and a directory to save to,
open all the gene sets, analyze them and deposit the results in the
specified directory.
Parameters:
-------------------
tissue_df - pandas dataframe containing specified tissue dictionary
directory - where to save to
save - boolean indicating whether to save results or not.
"""
with open(directory+'empty.txt', 'w') as f:
f.write('Genesets with no enrichment:\n')
# go through each file in the folder
for fodder in os.walk(path_sets):
for f_set in fodder[2]:
# open df
df = pd.read_csv(path_sets + f_set)
# extract gene list and analyze
short_name = f_set
test = df.gene.values
df_analysis, unused = tea.enrichment_analysis(test, tissue_df,
show=False)
# if it's not empty and you want to save:
if df_analysis.empty is False & save:
# save without index
df_analysis.to_csv(directory+short_name+'.csv', index=False)
# add a comment
line = '#' + short_name+'\n'
line_prepender(directory+short_name+'.csv', line)
# plot
tea.plot_enrichment_results(df_analysis, title=short_name,
dirGraphs=directory, ftype='pdf')
plt.close()
# if it's empty and you want to save, place it in file called empty
if df_analysis.empty & save:
with open(directory+'empty.txt', 'a+') as f:
f.write(short_name+'\n')
def compare(resA, resB, l, r):
"""Given two results (.csv files output by tea), open and compare them,
concatenate the dataframes
Parameters:
resA, resB -- filenames that store the dfs
l, r -- suffixes to attach to the columns post merge
Returns:
result - a dataframe that is the outer merger of resA, resB
"""
# open both dfs.
df1 = pd.read_csv(resA, comment='#')
df2 = pd.read_csv(resB, comment='#')
# drop observed column from df1
df1.drop('Observed', axis=1, inplace=True)
df2.drop('Observed', axis=1, inplace=True)
# make a dummy column, key for merging
df1['key'] = df1['Tissue']
df2['key'] = df2['Tissue']
# find the index of each tissue in either df
result = pd.merge(df1, df2, on='key', suffixes=[l, r], how='outer')
# sort by q val and drop non useful columns
# result.sort_values('Q value{0}'.format(l))
result.drop('Tissue{0}'.format(l), axis=1, inplace=True)
result.drop('Tissue{0}'.format(r), axis=1, inplace=True)
result['Tissue'] = result['key']
# drop key
result.drop('key', axis=1, inplace=True)
result.sort_values(['Q value%s' % (l), 'Q value%s' % (r)], inplace=True)
# drop Expected values
result.drop(['Expected%s' % (l), 'Expected%s' % (r)], axis=1, inplace=True)
# rearrange columns
cols = ['Tissue', 'Q value%s' % (l), 'Q value%s' % (r),
'Enrichment Fold Change%s' % (l), 'Enrichment Fold Change%s' % (r)]
result = result[cols]
# drop observed
return result # return result
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff25_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT25_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff50_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT50_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff100_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT100_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff33_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT33_any)
grouped = df_summary.groupby(['NoAnnotations', 'Threshold', 'Method'])
with open('../doc/figures/TissueNumbers.csv', 'w') as f:
f.write('Annotation Cutoff,Similarity Threshold,Method')
f.write(',No. Of Terms in Dictionary\n')
for key, group in grouped:
f.write('{0},{1},{2},{3}\n'.format(key[0], key[1], key[2],
group.TissuesTested.unique()[0]))
tissue_data = pd.read_csv('../output/SummaryInformation/TissueNumbers.csv')
sel = lambda y, z: ((tissue_data.iloc[:, 1] == y) &
(tissue_data.iloc[:, 2] == z))
# KDE of the fraction of all tissues that tested significant
cols = ['#1b9e77', '#d95f02', '#7570b3'] # used with varying colors
thresh = df_summary.Threshold.unique()
NoAnnotations = df_summary.NoAnnotations.unique()
# def resplot(column, cutoff=25, method='any'):
# """
# A method to quickly plot all combinations of cutoffs, thresholds.
# All cutoffs are same color
# All Thresholds are same line style
# """
# for i, threshold in enumerate(thresh):
# ax = plt.gca()
# ax.grid(False)
# if threshold == 1:
# continue
# tissue_data[sel(threshold, method)].plot(x='No. Of Annotations',
# y='No. Of Tissues in Dictionary',
# kind='scatter',
# color=cols[i],
# ax=ax, s=50, alpha=.7)
# ax.set_xlim(20, 110)
# ax.set_xscale('log')
# ax.set_xticks([25, 33, 50, 100])
# ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#
# ax.set_ylim(25, 1000)
# ax.set_yscale('log')
# ax.set_yticks([50, 100, 250, 500])
# ax.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#
#
# resplot('No. Of Tissues in Dictionary')
a = '../output/HGT33_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
b = '../output/HGT33_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
df = compare(a, b, 'Spencer', 'Watson')
df.to_csv('../output/comparisons/neuronal_comparison_33_WBPaper00024970_with_WBPaper0037950_complete.csv',
index=False, na_rep='-', float_format='%.2g')
a = '../output/HGT33_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
b = '../output/HGT50_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
df = compare(a, b, '33', '50')
df.to_csv('../output/comparisons/neuronal_comparison_GABAergic_33-50_WBPaper0037950_complete.csv',
index=False, na_rep='-', float_format='%.2g')
a = '../output/HGT33_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
b = '../output/HGT50_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
df = compare(a, b, '-33', '-50')
# print to figures
df.head(10).to_csv('../doc/figures/dict-comparison-50-33.csv', index=False,
na_rep='-', float_format='%.2g')
df.to_csv('../output/comparisons/neuronal_comparison_Pan_Neuronal_33-50_WBPaper0031532_complete.csv',
index=False, na_rep='-', float_format='%.2g')
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/image_interp.py | 6 | 1923 | #!/usr/bin/env python
"""
The same (small) array, interpolated with three different
interpolation methods.
The center of the pixel at A[i,j] is plotted at i+0.5, i+0.5. If you
are using interpolation='nearest', the region bounded by (i,j) and
(i+1,j+1) will have the same color. If you are using interpolation,
the pixel center will have the same color as it does with nearest, but
other pixels will be interpolated between the neighboring pixels.
Earlier versions of matplotlib (<0.63) tried to hide the edge effects
from you by setting the view limits so that they would not be visible.
A recent bugfix in antigrain, and a new implementation in the
matplotlib._image module which takes advantage of this fix, no longer
makes this necessary. To prevent edge effects, when doing
interpolation, the matplotlib._image module now pads the input array
with identical pixels around the edge. Eg, if you have a 5x5 array
with colors a-y as below
a b c d e
f g h i j
k l m n o
p q r s t
u v w x y
the _image module creates the padded array,
a a b c d e e
a a b c d e e
f f g h i j j
k k l m n o o
p p q r s t t
o u v w x y y
o u v w x y y
does the interpolation/resizing, and then extracts the central region.
This allows you to plot the full range of your array w/o edge effects,
and for example to layer multiple images of different sizes over one
another with different interpolation methods - see
examples/layer_images.py. It also implies a performance hit, as this
new temporary, padded array must be created. Sophisticated
interpolation also implies a performance hit, so if you need maximal
performance or have very large images, interpolation='nearest' is
suggested.
"""
from pylab import *
A = rand(5,5)
figure(1)
imshow(A, interpolation='nearest')
grid(True)
figure(2)
imshow(A, interpolation='bilinear')
grid(True)
figure(3)
imshow(A, interpolation='bicubic')
grid(True)
show()
| mit |
markslwong/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 9 | 67662 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
# Expected is 100, but because of the global step increment bug, this is 51.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
samzhang111/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
tdhopper/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
xwolf12/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
hlin117/statsmodels | statsmodels/graphics/gofplots.py | 29 | 26714 | from statsmodels.compat.python import lzip, string_types
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
from . import utils
__all__ = ['qqplot', 'qqplot_2samples', 'qqline', 'ProbPlot']
class ProbPlot(object):
"""
Class for convenient construction of Q-Q, P-P, and probability plots.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under kwargs.)
Parameters
----------
data : array-like
1d data array
dist : A scipy.stats or statsmodels distribution
Compare x against dist. The default is
scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
loc : float
Location parameter for dist
a : float
Offset for the plotting position of an expected order
statistic, for example. The plotting positions are given
by (i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1)
scale : float
Scale parameter for dist
fit : boolean
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `fit` is True then the parameters are fit using the
distribution's `fit()` method.
3) The call signatures for the `qqplot`, `ppplot`, and `probplot`
methods are similar, so examples 1 through 4 apply to all
three methods.
4) The three plotting methods are summarized below:
ppplot : Probability-Probability plot
Compares the sample and theoretical probabilities (percentiles).
qqplot : Quantile-Quantile plot
Compares the sample and theoretical quantiles
probplot : Probability plot
Same as a Q-Q plot, however probabilities are shown in the scale of
the theoretical distribution (x-axis) and the y-axis contains
unscaled quantiles of the sample data.
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> # example 1
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> model = sm.OLS(data.endog, data.exog)
>>> mod_fit = model.fit()
>>> res = mod_fit.resid # residuals
>>> probplot = sm.ProbPlot(res)
>>> probplot.qqplot()
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4
degrees of freedom:
>>> # example 2
>>> import scipy.stats as stats
>>> probplot = sm.ProbPlot(res, stats.t, distargs=(4,))
>>> fig = probplot.qqplot()
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> # example 3
>>> probplot = sm.ProbPlot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> fig = probplot.qqplot()
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> # example 4
>>> probplot = sm.ProbPlot(res, stats.t, fit=True)
>>> fig = probplot.qqplot(line='45')
>>> plt.show()
A second `ProbPlot` object can be used to compare two seperate sample
sets by using the `other` kwarg in the `qqplot` and `ppplot` methods.
>>> # example 5
>>> import numpy as np
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=37)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line='45', other=pp_y)
>>> plt.show()
The following plot displays some options, follow the link to see the
code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
def __init__(self, data, dist=stats.norm, fit=False,
distargs=(), a=0, loc=0, scale=1):
self.data = data
self.a = a
self.nobs = data.shape[0]
self.distargs = distargs
self.fit = fit
if isinstance(dist, string_types):
dist = getattr(stats, dist)
self.fit_params = dist.fit(data)
if fit:
self.loc = self.fit_params[-2]
self.scale = self.fit_params[-1]
if len(self.fit_params) > 2:
self.dist = dist(*self.fit_params[:-2],
**dict(loc = 0, scale = 1))
else:
self.dist = dist(loc=0, scale=1)
elif distargs or loc == 0 or scale == 1:
self.dist = dist(*distargs, **dict(loc=loc, scale=scale))
self.loc = loc
self.scale = scale
else:
self.dist = dist
self.loc = loc
self.scale = scale
# propertes
self._cache = resettable_cache()
@cache_readonly
def theoretical_percentiles(self):
return plotting_pos(self.nobs, self.a)
@cache_readonly
def theoretical_quantiles(self):
try:
return self.dist.ppf(self.theoretical_percentiles)
except TypeError:
msg = '%s requires more parameters to ' \
'compute ppf'.format(self.dist.name,)
raise TypeError(msg)
except:
msg = 'failed to compute the ppf of {0}'.format(self.dist.name,)
raise
@cache_readonly
def sorted_data(self):
sorted_data = np.array(self.data, copy=True)
sorted_data.sort()
return sorted_data
@cache_readonly
def sample_quantiles(self):
if self.fit and self.loc != 0 and self.scale != 1:
return (self.sorted_data-self.loc)/self.scale
else:
return self.sorted_data
@cache_readonly
def sample_percentiles(self):
quantiles = \
(self.sorted_data - self.fit_params[-2])/self.fit_params[-1]
return self.dist.cdf(quantiles)
def ppplot(self, xlabel=None, ylabel=None, line=None, other=None,
ax=None, **plotkwargs):
"""
P-P plot of the percentiles (probabilities) of x versus the
probabilities (percetiles) of a distribution.
Parameters
----------
xlabel, ylabel : str or None, optional
User-provided lables for the x-axis and y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : str {'45', 's', 'r', q'} or None, optional
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : `ProbPlot` instance, array-like, or None, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. If an array-like object is provided, it will be turned
into a `ProbPlot` instance using default parameters. If not provided
(default), the theoretical quantiles are used.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs : additional matplotlib arguments to be passed to the
`plot` command.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
fig, ax = _do_plot(other.sample_percentiles,
self.sample_percentiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Probabilities of 2nd Sample'
if ylabel is None:
ylabel = 'Probabilities of 1st Sample'
else:
fig, ax = _do_plot(self.theoretical_percentiles,
self.sample_percentiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = "Theoretical Probabilities"
if ylabel is None:
ylabel = "Sample Probabilities"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
return fig
def qqplot(self, xlabel=None, ylabel=None, line=None, other=None,
ax=None, **plotkwargs):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a
distribution or the quantiles of another `ProbPlot` instance.
Parameters
----------
xlabel, ylabel : str or None, optional
User-provided lables for the x-axis and y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : str {'45', 's', 'r', q'} or None, optional
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : `ProbPlot` instance, array-like, or None, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. If an array-like object is provided, it will be turned
into a `ProbPlot` instance using default parameters. If not
provided (default), the theoretical quantiles are used.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs : additional matplotlib arguments to be passed to the
`plot` command.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
fig, ax = _do_plot(other.sample_quantiles,
self.sample_quantiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Quantiles of 2nd Sample'
if ylabel is None:
ylabel = 'Quantiles of 1st Sample'
else:
fig, ax = _do_plot(self.theoretical_quantiles,
self.sample_quantiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = "Theoretical Quantiles"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
def probplot(self, xlabel=None, ylabel=None, line=None,
exceed=False, ax=None, **plotkwargs):
"""
Probability plot of the unscaled quantiles of x versus the
probabilities of a distibution (not to be confused with a P-P plot).
The x-axis is scaled linearly with the quantiles, but the probabilities
are used to label the axis.
Parameters
----------
xlabel, ylabel : str or None, optional
User-provided lables for the x-axis and y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : str {'45', 's', 'r', q'} or None, optional
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
exceed : boolean, optional
- If False (default) the raw sample quantiles are plotted against
the theoretical quantiles, show the probability that a sample
will not exceed a given value
- If True, the theoretical quantiles are flipped such that the
figure displays the probability that a sample will exceed a
given value.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs : additional matplotlib arguments to be passed to the
`plot` command.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if exceed:
fig, ax = _do_plot(self.theoretical_quantiles[::-1],
self.sorted_data,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Probability of Exceedance (%)'
else:
fig, ax = _do_plot(self.theoretical_quantiles,
self.sorted_data,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Non-exceedance Probability (%)'
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
_fmt_probplot_axis(ax, self.dist, self.nobs)
return fig
def qqplot(data, dist=stats.norm, distargs=(), a=0, loc=0, scale=1, fit=False,
line=None, ax=None):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under Parameters.)
Parameters
----------
data : array-like
1d data array
dist : A scipy.stats or statsmodels distribution
Compare x against dist. The default
is scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
loc : float
Location parameter for dist
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by (i - a)/(nobs - 2*a + 1)
for i in range(0,nobs+1)
scale : float
Scale parameter for dist
fit : boolean
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, data.exog).fit()
>>> res = mod_fit.resid # residuals
>>> fig = sm.qqplot(res)
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4 degrees
of freedom:
>>> import scipy.stats as stats
>>> fig = sm.qqplot(res, stats.t, distargs=(4,))
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> fig = sm.qqplot(res, stats.t, fit=True, line='45')
>>> plt.show()
The following plot displays some options, follow the link to see the code.
.. plot:: plots/graphics_gofplots_qqplot.py
Notes
-----
Depends on matplotlib. If `fit` is True then the parameters are fit using
the distribution's fit() method.
"""
probplot = ProbPlot(data, dist=dist, distargs=distargs,
fit=fit, a=a, loc=loc, scale=scale)
fig = probplot.qqplot(ax=ax, line=line)
return fig
def qqplot_2samples(data1, data2, xlabel=None, ylabel=None, line=None, ax=None):
"""
Q-Q Plot of two samples' quantiles.
Can take either two `ProbPlot` instances or two array-like objects. In the
case of the latter, both inputs will be converted to `ProbPlot` instances
using only the default values - so use `ProbPlot` instances if
finer-grained control of the quantile computations is required.
Parameters
----------
data1, data2 : array-like (1d) or `ProbPlot` instances
xlabel, ylabel : str or None
User-provided labels for the x-axis and y-axis. If None (default),
other values are used.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Examples
--------
>>> x = np.random.normal(loc=8.5, scale=2.5, size=37)
>>> y = np.random.normal(loc=8.0, scale=3.0, size=37)
>>> pp_x = sm.ProbPlot(x)
>>> pp_y = sm.ProbPlot(y)
>>> qqplot_2samples(data1, data2, xlabel=None, ylabel=None, line=None, ax=None):
Notes
-----
1) Depends on matplotlib.
2) If `data1` and `data2` are not `ProbPlot` instances, instances will be
created using the default parameters. Therefore, it is recommended to use
`ProbPlot` instance if fine-grained control is needed in the computation
of the quantiles.
"""
check_data1 = isinstance(data1, ProbPlot)
check_data2 = isinstance(data2, ProbPlot)
if not check_data1 and not check_data2:
data1 = ProbPlot(data1)
data2 = ProbPlot(data2)
fig = data1.qqplot(xlabel=xlabel, ylabel=ylabel,
line=line, other=data2, ax=ax)
return fig
def qqline(ax, line, x=None, y=None, dist=None, fmt='r-'):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {'45','r','s','q'}
Options for the reference line to which the data is compared.:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled by
the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : array
X data for plot. Not needed if line is '45'.
y : array
Y data for plot. Not needed if line is '45'.
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is 'q'.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
"""
if line == '45':
end_pts = lzip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, fmt)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
return # does this have any side effects?
if x is None and y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
elif line == 'r':
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are 'clean'
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x,y,fmt)
elif line == 's':
m,b = y.std(), y.mean()
ref_line = x*m + b
ax.plot(x, ref_line, fmt)
elif line == 'q':
_check_for_ppf(dist)
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([0.25, 0.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m*theoretical_quartiles[0]
ax.plot(x, m*x + b, fmt)
#about 10x faster than plotting_position in sandbox and mstats
def plotting_pos(nobs, a):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float
Offset for the plotting position of an expected order statistic, for
example.
Returns
-------
plotting_positions : array
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs - 2*a + 1) for i in
range(0,nobs+1)
See also
--------
scipy.stats.mstats.plotting_positions
"""
return (np.arange(1.,nobs+1) - a)/(nobs- 2*a + 1)
def _fmt_probplot_axis(ax, dist, nobs):
"""
Formats a theoretical quantile axis to display the corresponding
probabilities on the quantiles' scale.
Parameteters
------------
ax : Matplotlib AxesSubplot instance, optional
The axis to be formatted
nobs : scalar
Numbero of observations in the sample
dist : scipy.stats.distribution
A scipy.stats distribution sufficiently specified to impletment its
ppf() method.
Returns
-------
There is no return value. This operates on `ax` in place
"""
_check_for_ppf(dist)
if nobs < 50:
axis_probs = np.array([1,2,5,10,20,30,40,50,60,
70,80,90,95,98,99,])/100.0
elif nobs < 500:
axis_probs = np.array([0.1,0.2,0.5,1,2,5,10,20,30,40,50,60,70,
80,90,95,98,99,99.5,99.8,99.9])/100.0
else:
axis_probs = np.array([0.01,0.02,0.05,0.1,0.2,0.5,1,2,5,10,
20,30,40,50,60,70,80,90,95,98,99,99.5,
99.8,99.9,99.95,99.98,99.99])/100.0
axis_qntls = dist.ppf(axis_probs)
ax.set_xticks(axis_qntls)
ax.set_xticklabels(axis_probs*100, rotation=45,
rotation_mode='anchor',
horizontalalignment='right',
verticalalignment='center')
ax.set_xlim([axis_qntls.min(), axis_qntls.max()])
def _do_plot(x, y, dist=None, line=False, ax=None, fmt='bo', **kwargs):
"""
Boiler plate plotting function for the `ppplot`, `qqplot`, and
`probplot` methods of the `ProbPlot` class
Parameteters
------------
x, y : array-like
Data to be plotted
dist : scipy.stats.distribution
A scipy.stats distribution, needed if `line` is 'q'.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
fmt : str, optional
matplotlib-compatible formatting string for the data markers
kwargs : keywords
These are passed to matplotlib.plot
Returns
-------
fig : Matplotlib Figure instance
ax : Matplotlib AxesSubplot instance (see Parameters)
"""
fig, ax = utils.create_mpl_ax(ax)
ax.set_xmargin(0.02)
ax.plot(x, y, fmt, **kwargs)
if line:
if line not in ['r','q','45','s']:
msg = "%s option for line not understood" % line
raise ValueError(msg)
qqline(ax, line, x=x, y=y, dist=dist)
return fig, ax
def _check_for_ppf(dist):
if not hasattr(dist, 'ppf'):
raise ValueError("distribution must have a ppf method")
| bsd-3-clause |
sk2/ank_le | AutoNetkit/plotting/PathDrawer.py | 1 | 13638 | """
**********
PathDrawer
**********
Draw multiple paths in a graph. Uses matplotlib (pylab).
The path corners are rounded with the help of cube Bezier curves.
If two or more paths traverse the same edge, they are automatically shifted to make them all visible.
References:
- matplotlib: http://matplotlib.sourceforge.net/
- Bezier curves: http://matplotlib.sourceforge.net/api/path_api.html
"""
__author__ = """Maciej Kurant (maciej.kurant@epfl.ch)"""
# Copyright (C) 2008 by
# Maciej Kurant <maciej.kurant@epfl.ch>
# Distributed under the terms of the GNU Lesser General Public License
# http://www.gnu.org/copyleft/lesser.html
__all__ = ['is_valid_edge_path',
'is_valid_node_path',
'to_node_path',
'to_edge_path',
'normalize_layout',
'draw_path',
'draw_many_paths']
try:
import matplotlib
import matplotlib.path
except ImportError:
#raise ImportError, "Import Error: not able to import matplotlib."
MPath=matplotlib.path.Path
pass
except RuntimeError:
pass # unable to open display
import numpy
import random
import math
#####################
def is_valid_edge_path(path, G):
'''Returns True if path consists of consecutive edges in G, and False otherwise.'''
if len(path)==0: return True
for i in range(len(path)-1):
try:
if len(path[i])<2 or len(path[i+1])<2: return False
except TypeError: # no len() specified, e.g., an integer
return False
if path[i][1]!=path[i+1][0]: return False
if not G.has_edge(*path[i]): return False
if not G.has_edge(*path[-1]): return False
return True
#####################
def is_valid_node_path(path, G):
'''Returns True if path is valid in G, and False otherwise.'''
if len(path)<2: return False
for i in range(len(path)-1):
if not G.has_edge(path[i],path[i+1]): return False
return True
#####################
def to_node_path(edge_path):
'E.g., [(10, 3), (3, 6), (6, 11)] -> [10,3,6,11]'
np = [e[0] for e in edge_path]
np.append(edge_path[-1][1])
return np
#####################
def to_edge_path(path, G=None):
'''Converts a node_path to edge_path, e.g., [10,3,6,11] -> [(10, 3), (3, 6), (6, 11)]
If G is given, then the path validity is checked. Then 'path' is tolerated to be
also an edge_path; in this case it is returned directly.
'''
if G==None:
return [(path[i],path[i+1]) for i in range(len(path)-1)]
if is_valid_node_path(path,G):
return to_edge_path(path)
else:
if not is_valid_edge_path(path,G): raise ValueError('Not a valid path:\npath='+str(path))
return path
#####################
def vector_length(v):
'''Returns the length of vector v=numpy.array([x,y]).'''
return math.sqrt(numpy.dot(v,v))
#####################
def norm_vector(v):
'''Returns a vector numpy.array([x,y]) of length 1, pointing in the same
direction as v = numpy.array([x0,y0]) .'''
l=vector_length(v)
if l==0.: raise ValueError('Vector v='+str(v)+' has length 0 !')
return v/l
#####################
def perpendicular_vector(v):
'''Returns a vector numpy.array([x,y]) perpendicular to v.'''
return numpy.array([v[1],-v[0]])
#####################
def crossing_point(p1a, p1b, p2a, p2b):
'''Returns the crossing of line1 defined by two points p1a and p1b, and line2 defined by two points p2a, p2b.
All points should be of format numpy.array([x,y]).
If line1 and line2 are parallel then returns None.
'''
# See e.g.: http://stackoverflow.com/questions/153592/how-do-i-determine-the-intersection-point-of-two-lines-in-gdi
if tuple(p1a)==tuple(p1b) or tuple(p2a)==tuple(p2b): raise ValueError('Two points defining a line are identical!')
v1 = p1b-p1a
v2 = p2b-p2a
x12 = p2a-p1a
D = numpy.dot(v1,v1)*numpy.dot(v2,v2) - numpy.dot(v1,v2) * numpy.dot(v1,v2)
if D==0: return None # Lines are parallel!
a = (numpy.dot(v2,v2) * numpy.dot(v1,x12) - numpy.dot(v1,v2) * numpy.dot(v2,x12)) / D
return p1a + v1*a
#####################
def is_layout_normalized(pos):
'True if points in pos stay within (0,0) x (1,1)'
A = numpy.asarray(pos.values())
if min(A[:,0])<0 or min(A[:,1])<0 or max(A[:,0])>1 or max(A[:,1])>1:
return False
return True
#####################
def normalize_layout(pos):
'''All node positions are normalized to fit in the unit area (0,0)x(1,1).'''
if len(pos)==1:
v=pos.keys()[0]
pos[v]= numpy.array([0.5,0.5])
return
A=numpy.asarray(pos.values())
x0,y0,x1,y1 = min(A[:,0]),min(A[:,1]),max(A[:,0]),max(A[:,1])
for v in pos:
pos[v] = (pos[v]-(x0,y0))/(x1-x0,y1-y0)*0.8+(0.1,0.1)
return
#####################
def draw_path(G, pos, path, shifts=None, color='r', linestyle='solid', linewidth=1.0):
'''Draw a path 'path' in graph G.
Parameters
----------
pos : a node layout used to draw G. Must be normalized to (0,0)x(1,1),
e.g., by function normalize_layout(pos)
path : edge_path or node_path
shifts : a list of length len(edge_path) specifying how far the path
must be drawn from every edge it traverses.
color : e.g., one out of ('b','g','r','c','m','y').
linestyle : one out of ('solid','dashed','dashdot','dotted')
linewidth : float, in pixels
Examples
--------
>>> g=networkx.krackhardt_kite_graph()
>>> pos=networkx.drawing.spring_layout(g)
>>> normalize_layout(pos)
>>> networkx.draw(g,pos)
>>> path = networkx.shortest_path(g, 3, 9)
>>> draw_path(g, pos, path, color='g', linewidth=2.0)
>>> matplotlib.pyplot.show()
'''
if not is_layout_normalized(pos): raise ValueError('Layout is not normalized!')
edge_path = to_edge_path(path, G)
if len(edge_path)==0: return
if shifts==None: shifts = [0.02] * len(edge_path)
if len(shifts)!=len(edge_path): raise ValueError("The argument 'shifts' does not match 'edge_path'!")
# edge_pos - positions of edges
# edge_shifts - shifts of edges along a perpendicular vectors; the shifting distance is determined by 'shifts'
edge_pos = [numpy.array([pos[e[0]],pos[e[1]]]) for e in edge_path]
edge_shifts = [ shifts[i]*perpendicular_vector(norm_vector(p1b-p1a)) for i,(p1a,p1b) in enumerate(edge_pos)]
# prepare vertices and codes for object matplotlib.path.Path(vertices, codes) - the path to display
# vertices: an Nx2 float array of vertices (not the same as graph nodes!)
# codes: an N-length uint8 array of vertex types (such as MOVETO, LINETO, CURVE4) - a cube Bezier curve
# See e.g. http://matplotlib.sourceforge.net/api/path_api.html
# First, for every corner (node on the path), we define 4 points to smoothen it
corners=[]
#The first 'corner' - on a straight line, easier to process next
p1a,p1b = edge_pos[0] + edge_shifts[0]
V1=p1b-p1a
corners.append([p1a, p1a+0.1*V1, p1a+0.1*V1, p1a+0.2*V1])
#All real corners - with edes on both sides
for i in range(len(edge_pos)-1):
p_node = edge_pos[i][1] # crossing point of the original (i)th and (i+1)th edges
p1a,p1b = edge_pos[i] + edge_shifts[i] # two points defining the shifted (i)th edge
p2a,p2b = edge_pos[i+1] + edge_shifts[i+1] # two points defining the shifted (i+1)th edge
V1 = norm_vector(p1b - p1a) # unit vector along the (i)th edge
V2 = norm_vector(p2b - p2a) # unit vector along the (i+1)th edge
p_middle_angle = p_node + (V2-V1) # a point that splits evenly the angle between the original (i)th and (i+1)th edges
c12 = crossing_point(p1a, p1b, p2a, p2b) # crossing point of the shifted (i)th and (i+1)th edges
if c12==None: # the edges are parallel
c12 = (p1b+p2a)/2
p_middle_angle = c12
c1 = crossing_point(p1a,p1b,p_node,p_middle_angle) # crossing point of the shifted (i)th edge and the middle-angle-line
c2 = crossing_point(p2a,p2b,p_node,p_middle_angle) # crossing point of the shifted (i+1)th edge and the middle-angle-line
D= 0.5*(shifts[i]+shifts[i+1]) # average shift - a reasonable normalized distance measure
if vector_length(p_node-c12) < 2.5*D: # if the crossing point c12 is 'relatively close' to the node
corners.append([c12-D*V1, c12, c12, c12+D*V2]) # then c12 defines two consecutive reference points in the cube Bezier curve
else: # the crossing point c12 is NOT 'relatively close' to the node
P1=p1b + D*V1
if numpy.dot(c1-P1, V1)<0: P1=c1
P2=p2a - D*V2
if numpy.dot(c2-P2, V2)>0: P2=c2
corners.append([P1-D*V1, P1, P2, P2+D*V2])
#The last 'corner' - on one line, easier to process next
p1a,p1b = edge_pos[-1] + edge_shifts[-1]
V1=p1b-p1a
corners.append( [p1b-0.2*V1, p1b-0.1*V1, p1b-0.1*V1, p1b] )
# Now, based on corners, we prepare vertices and codes
vertices=[]
codes = []
# First operation must be a MOVETO, move pen to first vertex on the path
vertices += [corners[0][0]]
codes += [MPath.MOVETO]
for i,corner in enumerate(corners):
# If there is not enough space to draw a corner, then replace the last two vertices from the previous section, by the last two vertives of the current section
if i>0:
if vector_length(norm_vector(corner[0]-vertices[-1]) - norm_vector(corner[1]-corner[0]))>1:
vertices.pop();
vertices.pop();
vertices += corner[-2:]
continue
codes+=[MPath.LINETO, MPath.CURVE4, MPath.CURVE4, MPath.CURVE4]
vertices+=corner
# Finally, create a nice path and display it
path = MPath(vertices, codes)
patch = matplotlib.patches.PathPatch(path, edgecolor=color, linestyle=linestyle, linewidth=linewidth, fill=False, alpha=1.0)
ax=matplotlib.pylab.gca()
ax.add_patch(patch)
ax.update_datalim(((0,0),(1,1)))
ax.autoscale_view()
return
#####################
def draw_many_paths(G, pos, paths, max_shift=0.02, linewidth=2.0):
'''Draw every path in 'paths' in graph G.
Colors and linestyles are chosen automatically.
All paths are visible - no path section can be covered by another path.
Parameters
----------
pos : a node layout used to draw G. Must be normalized to (0,0)x(1,1),
e.g., by function normalize_layout(pos)
paths : a collection of node_paths or edge_paths
max_shift : maximal distance between an edge and a path traversing it.
linewidth : float, in pixels.
Examples
--------
>>> g=networkx.krackhardt_kite_graph()
>>> g.remove_node(9)
>>> path1 = networkx.shortest_path(g, 2, 8)
>>> path2 = networkx.shortest_path(g, 0, 8)
>>> path3 = [(1,0),(0,5),(5,7)] # edge_path
>>> path4 = [3,5,7,6] # node_path
>>> pos=networkx.drawing.spring_layout(g)
>>> normalize_layout(pos)
>>> networkx.draw(g,pos, node_size=140)
>>> draw_many_paths(g, pos, [path1, path2, path3, path4], max_shift=0.03)
>>> matplotlib.pyplot.show()
'''
if len(paths)==0: return
if not is_layout_normalized(pos): raise ValueError('Layout is not normalized!')
edge_paths=[to_edge_path(path,G) for path in paths]
edge_paths.sort(key=len, reverse=True) # Sort edge_paths from the longest to the shortest
# Find the largest number of edge_paths traversing the same edge and set single_shift accordingly
edge2count = {}
for path in edge_paths:
for e in path:
edge2count[e] = edge2count.get(e,0) + 1
single_shift = max_shift/max(edge2count.values())
# Draw the edge_paths by calling draw_path(...). Use edge2shift to prevent the path overlap on some edges.
colors=('b','g','r','c','m','y')
linestyles=('solid','dashed','dashdot','dotted')
edge2shift={}
for i,path in enumerate(edge_paths):
shifts=[ edge2shift.setdefault(e, single_shift) for e in path]
draw_path(G, pos, path, color=colors[i%len(colors)], linestyle=linestyles[i/len(colors) % len(linestyles)], linewidth=linewidth, shifts=shifts)
for e in path: edge2shift[e] += single_shift
return
##########################################
if __name__ == "__main__":
# Example
import networkx
g=networkx.krackhardt_kite_graph()
g.remove_node(9)
path1 = networkx.shortest_path(g, 2, 8)
path2 = networkx.shortest_path(g, 0, 8)
path3 = [(1,0),(0,5),(5,7)]
path4 = [3,5,7,6]
pos=networkx.drawing.spring_layout(g)
normalize_layout(pos)
networkx.draw(g,pos, node_size=140)
draw_many_paths(g, pos, [path1, path2, path3, path4], max_shift=0.03)
matplotlib.pyplot.savefig("PathDrawer.png")
matplotlib.pyplot.show()
| bsd-3-clause |
ADicksonLab/wepy | src/wepy/analysis/network.py | 1 | 52176 | """Module that allows for imposing a kinetically connected network
structure of weighted ensemble simulation data.
"""
from collections import defaultdict
from copy import deepcopy
import gc
import numpy as np
import networkx as nx
from wepy.analysis.transitions import (
transition_counts,
counts_d_to_matrix,
)
try:
import pandas as pd
except ModuleNotFoundError:
print("Pandas is not installe, that functionality won't work")
class MacroStateNetworkError(Exception):
"""Errors specific to MacroStateNetwork requirements."""
pass
class BaseMacroStateNetwork():
"""A base class for the MacroStateNetwork which doesn't contain a
WepyHDF5 object. Useful for serialization of the object and can
then be reattached later to a WepyHDF5. For this functionality see
the 'MacroStateNetwork' class.
BaseMacroStateNetwork can also be though of as just a way of
mapping macrostate properties to the underlying microstate data.
The network itself is a networkx directed graph.
Upon construction the nodes will be a value called the 'node_id'
which is the label/assignment for the node. This either comes from
an explicit labelling (the 'assignments' argument) or from the
labels/assignments from the contig tree (from the 'assg_field_key'
argument).
Nodes have the following attributes after construction:
- node_id :: Same as the actual node value
- node_idx :: An extra index that is used for 'internal' ordering
of the nodes in a consistent manner. Used for
example in any method which constructs matrices from
edges and ensures they are all the same.
- assignments :: An index trace over the contig_tree dataset used
to construct the network. This is how the
individual microstates are indexed for each node.
- num_samples :: A total of the number of microstates that a node
has. Is the length of the 'assignments' attribute.
Additionally, there are auxiliary node attributes that may be
added by various methods. All of these are prefixed with a single
underscore '_' and any user set values should avoid this.
These auxiliary attributes also make use of namespacing, where
namespaces are similar to file paths and are separated by '/'
characters.
Additionally the auxiliary groups are typically managed such that
they remain consistent across all of the nodes and have metadata
queryable from the BaseMacroStateNetwork object. In contrast user
defined node attributes are not restricted to this structure.
The auxiliary groups are:
- '_groups' :: used to mark nodes as belonging to a higher level group.
- '_observables' :: used for scalar values that are calculated
from the underlying microstate structures. As
opposed to more operational values describing
the network itself. By virtue of being scalar
these are also compatible with output to
tabular formats.
Edge values are simply 2-tuples of node_ids where the first value
is the source and the second value is the target. Edges have the
following attributes following initialization:
- 'weighted_counts' :: The weighted sum of all the transitions
for an edge. This is a floating point
number.
- 'unweighted_counts' :: The unweighted sum of all the
transitions for an edge, this is a
normal count and is a whole integer.
- 'all_transition' :: This is an array of floats of the weight
for every individual transition for an
edge. This is useful for doing more
advanced statistics for a given edge.
A network object can be used as a stateful container for
calculated values over the nodes and edges and has methods to
support this. However, there is no standard way to serialize this
data beyond the generic python techniques like pickle.
"""
ASSIGNMENTS = 'assignments'
"""Key for the microstates that are assigned to a macrostate."""
def __init__(self,
contig_tree,
assg_field_key=None,
assignments=None,
transition_lag_time=2):
"""Create a network of macrostates from the simulation microstates
using a field in the trajectory data or precomputed assignments.
Either 'assg_field_key' or 'assignments' must be given, but not
both.
The 'transition_lag_time' is default set to 2, which is the natural connection
between microstates. The lag time can be increased to vary the
kinetic accuracy of transition probabilities generated through
Markov State Modelling.
The 'transition_lag_time' must be given as an integer greater
than 1.
Arguments
---------
contig_tree : ContigTree object
assg_field_key : str, conditionally optional on 'assignments'
The field in the WepyHDF5 dataset you want to generate macrostates for.
assignments : list of list of array_like of dim (n_traj_frames, observable_shape[0], ...),
conditionally optional on 'assg_field_key'
List of assignments for all frames in each run, where each
element of the outer list is for a run, the elements of
these lists are lists for each trajectory which are
arraylikes of shape (n_traj, observable_shape[0], ...).
See Also
"""
self._graph = nx.DiGraph()
assert not (assg_field_key is None and assignments is None), \
"either assg_field_key or assignments must be given"
assert assg_field_key is not None or assignments is not None, \
"one of assg_field_key or assignments must be given"
self._base_contig_tree = contig_tree.base_contigtree
self._assg_field_key = assg_field_key
# initialize the groups dictionary
self._node_groups = {}
# initialize the list of the observables
self._observables = []
# initialize the list of available layouts
self._layouts = []
# initialize the lookup of the node_idxs from node_ids
self._node_idxs = {}
# initialize the reverse node lookups which is memoized if
# needed
self._node_idx_to_id_dict = None
# validate lag time input
if (
(transition_lag_time is not None) and
(transition_lag_time < 2)
):
raise MacroStateNetworkError(
"transition_lag_time must be an integer value >= 2"
)
self._transition_lag_time = transition_lag_time
## Temporary variables for initialization only
# the temporary assignments dictionary
self._node_assignments = None
# and temporary raw assignments
self._assignments = None
## Code for creating nodes and edges
## Nodes
with contig_tree:
# map the keys to their lists of assignments, depending on
# whether or not we are using a field from the HDF5 traj or
# assignments provided separately
if assg_field_key is not None:
assert type(assg_field_key) == str, "assignment key must be a string"
self._key_init(contig_tree)
else:
self._assignments_init(assignments)
# once we have made the dictionary add the nodes to the network
# and reassign the assignments to the nodes
for node_idx, assg_item in enumerate(self._node_assignments.items()):
assg_key, assigs = assg_item
# count the number of samples (assigs) and use this as a field as well
num_samples = len(assigs)
# save the nodes with attributes, we save the node_id
# as the assg_key, because of certain formats only
# typing the attributes, and we want to avoid data
# loss, through these formats (which should be avoided
# as durable stores of them though)
self._graph.add_node(assg_key,
node_id=assg_key,
node_idx=node_idx,
assignments=assigs,
num_samples=num_samples)
self._node_idxs[assg_key] = node_idx
## Edges
all_transitions_d, \
weighted_counts_d, \
unweighted_counts_d = self._init_transition_counts(
contig_tree,
transition_lag_time,
)
# after calculating the transition counts set these as edge
# values make the edges with these attributes
for edge, all_trans in all_transitions_d.items():
weighted_counts = weighted_counts_d[edge]
unweighted_counts = unweighted_counts_d[edge]
# add the edge with all of the values
self._graph.add_edge(
*edge,
weighted_counts=weighted_counts,
unweighted_counts=unweighted_counts,
all_transitions=all_trans,
)
## Cleanup
# then get rid of the assignments dictionary, this information
# can be accessed from the network
del self._node_assignments
del self._assignments
def _key_init(self, contig_tree):
"""Initialize the assignments structures given the field key to use.
Parameters
----------
"""
wepy_h5 = contig_tree.wepy_h5
# blank assignments
assignments = [[[] for traj_idx in range(wepy_h5.num_run_trajs(run_idx))]
for run_idx in wepy_h5.run_idxs]
test_field = wepy_h5.get_traj_field(
wepy_h5.run_idxs[0],
wepy_h5.run_traj_idxs(0)[0],
self.assg_field_key,
)
# WARN: assg_field shapes can come wrapped with an extra
# dimension. We handle both cases. Test the first traj and see
# how it is
unwrap = False
if len(test_field.shape) == 2 and test_field.shape[1] == 1:
# then we raise flag to unwrap them
unwrap = True
elif len(test_field.shape) == 1:
# then it is unwrapped and we don't need to do anything,
# just assert the flag to not unwrap
unwrap = False
else:
raise ValueError(f"Wrong shape for an assignment type observable: {test_field.shape}")
# the raw assignments
curr_run_idx = -1
for idx_tup, fields_d in wepy_h5.iter_trajs_fields(
[self.assg_field_key], idxs=True):
run_idx = idx_tup[0]
traj_idx = idx_tup[1]
assg_field = fields_d[self.assg_field_key]
# if we need to we unwrap the assignements scalar values
# if they need it
if unwrap:
assg_field = np.ravel(assg_field)
assignments[run_idx][traj_idx].extend(assg_field)
# then just call the assignments constructor to do it the same
# way
self._assignments_init(assignments)
def _assignments_init(self, assignments):
"""Given the assignments structure sets up the other necessary
structures.
Parameters
----------
assignments : list of list of array_like of dim (n_traj_frames, observable_shape[0], ...),
conditionally optional on 'assg_field_key'
List of assignments for all frames in each run, where each
element of the outer list is for a run, the elements of
these lists are lists for each trajectory which are
arraylikes of shape (n_traj, observable_shape[0], ...).
"""
# set the type for the assignment field
self._assg_field_type = type(assignments[0])
# set the raw assignments to the temporary attribute
self._assignments = assignments
# this is the dictionary mapping node_id -> the (run_idx, traj_idx, cycle_idx) frames
self._node_assignments = defaultdict(list)
for run_idx, run in enumerate(assignments):
for traj_idx, traj in enumerate(run):
for frame_idx, assignment in enumerate(traj):
self._node_assignments[assignment].append( (run_idx, traj_idx, frame_idx) )
def _init_transition_counts(self,
contig_tree,
transition_lag_time,
):
"""Given the lag time get the transitions between microstates for the
network using the sliding windows algorithm.
This will create a directed edge between nodes that had at
least one transition, no matter the weight.
See the main class docstring for a description of the fields.
contig_tree should be unopened.
"""
# now count the transitions between the states and set those
# as the edges between nodes
# first get the sliding window transitions from the contig
# tree, once we set edges for a tree we don't really want to
# have multiple sets of transitions on the same network so we
# don't provide the method to add different assignments
# get the weights for the walkers so we can compute
# the weighted transition counts
with contig_tree:
weights = [[] for run_idx in contig_tree.wepy_h5.run_idxs]
for idx_tup, traj_fields_d in contig_tree.wepy_h5.iter_trajs_fields(
['weights'],
idxs=True):
run_idx, traj_idx = idx_tup
weights[run_idx].append(np.ravel(traj_fields_d['weights']))
# get the transitions as trace idxs
trace_transitions = []
for window in contig_tree.sliding_windows(transition_lag_time):
trace_transition = [window[0], window[-1]]
# convert the window trace on the contig to a trace
# over the runs
trace_transitions.append(trace_transition)
# ALERT: I'm not sure this is going to work out since this is
# potentially a lot of data and might make the object too
# large, lets just be aware and maybe we'll have to not do
# this if things are out of control.
## transition distributions
# get an array of all of the transition weights so we can do
# stats on them later.
all_transitions_d = defaultdict(list)
for trace_transition in trace_transitions:
# get the node ids of the edge using the assignments
start = trace_transition[0]
end = trace_transition[-1]
# get the assignments for the transition
start_assignment = self._assignments[start[0]][start[1]][start[2]]
end_assignment = self._assignments[end[0]][end[1]][end[2]]
edge_id = (start_assignment, end_assignment)
# get the weight of the walker that transitioned, this
# uses the trace idxs for the individual walkers
weight = weights[start[0]][start[1]][start[2]]
# append this transition weight to the list for it, but
# according to the node_ids, in edge_id
all_transitions_d[edge_id].append(weight)
# convert the lists in the transition dictionary to numpy arrays
all_transitions_d = {
edge : np.array(transitions_l)
for edge, transitions_l in all_transitions_d.items()
}
gc.collect()
## sum of weighted counts
# then get the weighted counts for those edges
weighted_counts_d = transition_counts(
self._assignments,
trace_transitions,
weights=weights,
)
## Sum of unweighted counts
# also get unweighted counts
unweighted_counts_d = transition_counts(
self._assignments,
trace_transitions,
weights=None,
)
return all_transitions_d, \
weighted_counts_d, \
unweighted_counts_d
# DEBUG: remove this, but account for the 'Weight' field when
# doing gexf stuff elsewhere
# # then we also want to get the transition probabilities so
# # we get the counts matrix and compute the probabilities
# # we first have to replace the keys of the counts of the
# # node_ids with the node_idxs
# node_id_to_idx_dict = self.node_id_to_idx_dict()
# self._countsmat = counts_d_to_matrix(
# {(node_id_to_idx_dict[edge[0]],
# node_id_to_idx_dict[edge[1]]) : counts
# for edge, counts in counts_d.items()})
# self._probmat = normalize_counts(self._countsmat)
# # then we add these attributes to the edges in the network
# node_idx_to_id_dict = self.node_id_to_idx_dict()
# for i_id, j_id in self._graph.edges:
# # i and j are the node idxs so we need to get the
# # actual node_ids of them
# i_idx = node_idx_to_id_dict[i_id]
# j_idx = node_idx_to_id_dict[j_id]
# # convert to a normal float and set it as an explicitly named attribute
# self._graph.edges[i_id, j_id]['transition_probability'] = \
# float(self._probmat[i_idx, j_idx])
# # we also set the general purpose default weight of
# # the edge to be this.
# self._graph.edges[i_id, j_id]['Weight'] = \
# float(self._probmat[i_idx, j_idx])
def node_id_to_idx(self, assg_key):
"""Convert a node_id (which is the assignment value) to a canonical index.
Parameters
----------
assg_key : node_id
Returns
-------
node_idx : int
"""
return self.node_id_to_idx_dict()[assg_key]
def node_idx_to_id(self, node_idx):
"""Convert a node index to its node id.
Parameters
----------
node_idx : int
Returns
-------
node_id : node_id
"""
return self.node_idx_to_id_dict()[node_idx]
def node_id_to_idx_dict(self):
"""Generate a full mapping of node_ids to node_idxs."""
return self._node_idxs
def node_idx_to_id_dict(self):
"""Generate a full mapping of node_idxs to node_ids."""
if self._node_idx_to_id_dict is None:
rev = {node_idx : node_id for node_id, node_idx in self._node_idxs.items()}
self._node_idx_to_id_dict = rev
else:
rev = self._node_idx_to_id_dict
# just reverse the dictionary and return
return rev
@property
def graph(self):
"""The networkx.DiGraph of the macrostate network."""
return self._graph
@property
def num_states(self):
"""The number of states in the network."""
return len(self.graph)
@property
def node_ids(self):
"""A list of the node_ids."""
return list(self.graph.nodes)
@property
def contig_tree(self):
"""The underlying ContigTree"""
return self._base_contig_tree
@property
def assg_field_key(self):
"""The string key of the field used to make macro states from the WepyHDF5 dataset.
Raises
------
MacroStateNetworkError
If this wasn't used to construct the MacroStateNetwork.
"""
if self._assg_field_key is None:
raise MacroStateNetworkError("Assignments were manually defined, no key.")
else:
return self._assg_field_key
### Node attributes & methods
def get_node_attributes(self, node_id):
"""Returns the node attributes of the macrostate.
Parameters
----------
node_id : node_id
Returns
-------
macrostate_attrs : dict
"""
return self.graph.nodes[node_id]
def get_node_attribute(self, node_id, attribute_key):
"""Return the value for a specific node and attribute.
Parameters
----------
node_id : node_id
attribute_key : str
Returns
-------
node_attribute
"""
return self.get_node_attributes(node_id)[attribute_key]
def get_nodes_attribute(self, attribute_key):
"""Get a dictionary mapping nodes to a specific attribute. """
nodes_attr = {}
for node_id in self.graph.nodes:
nodes_attr[node_id] = self.graph.nodes[node_id][attribute_key]
return nodes_attr
def node_assignments(self, node_id):
"""Return the microstates assigned to this macrostate as a run trace.
Parameters
----------
node_id : node_id
Returns
-------
node_assignments : list of tuples of ints (run_idx, traj_idx, cycle_idx)
Run trace of the nodes assigned to this macrostate.
"""
return self.get_node_attribute(node_id, self.ASSIGNMENTS)
def set_nodes_attribute(self, key, values_dict):
"""Set node attributes for the key and values for each node.
Parameters
----------
key : str
values_dict : dict of node_id: values
"""
for node_id, value in values_dict.items():
self.graph.nodes[node_id][key] = value
@property
def node_groups(self):
return self._node_groups
def set_node_group(self, group_name, node_ids):
# push these values to the nodes themselves, overwriting if
# necessary
self._set_group_nodes_attribute(group_name, node_ids)
# then update the group mapping with this
self._node_groups[group_name] = node_ids
def _set_group_nodes_attribute(self, group_name, group_node_ids):
# the key for the attribute of the group goes in a little
# namespace prefixed with _group
group_key = '_groups/{}'.format(group_name)
# make the mapping
values_map = {node_id : True if node_id in group_node_ids else False
for node_id in self.graph.nodes}
# then set them
self.set_nodes_attribute(group_key, values_map)
@property
def observables(self):
"""The list of available observables."""
return self._observables
def node_observables(self, node_id):
"""Dictionary of observables for each node_id."""
node_obs = {}
for obs_name in self.observables:
obs_key = '_observables/{}'.format(obs_name)
node_obs[obs_name] = self.get_nodes_attributes(node_id, obs_key)
return node_obs
def set_nodes_observable(self, observable_name, node_values):
# the key for the attribute of the observable goes in a little
# namespace prefixed with _observable
observable_key = '_observables/{}'.format(observable_name)
self.set_nodes_attribute(observable_key, node_values)
# then add to the list of available observables
self._observables.append(observable_name)
### Edge methods
def get_edge_attributes(self, edge_id):
"""Returns the edge attributes of the macrostate.
Parameters
----------
edge_id : edge_id
Returns
-------
edge_attrs : dict
"""
return self.graph.edges[edge_id]
def get_edge_attribute(self, edge_id, attribute_key):
"""Return the value for a specific edge and attribute.
Parameters
----------
edge_id : edge_id
attribute_key : str
Returns
-------
edge_attribute
"""
return self.get_edge_attributes(edge_id)[attribute_key]
def get_edges_attribute(self, attribute_key):
"""Get a dictionary mapping edges to a specific attribute. """
edges_attr = {}
for edge_id in self.graph.edges:
edges_attr[edge_id] = self.graph.edges[edge_id][attribute_key]
return edges_attr
### Layout stuff
@property
def layouts(self):
return self._layouts
def node_layouts(self, node_id):
"""Dictionary of layouts for each node_id."""
node_layouts = {}
for layout_name in self.layouts:
layout_key = '_layouts/{}'.format(layout_name)
node_layouts[obs_name] = self.get_nodes_attributes(node_id, layout_key)
return node_layouts
def set_nodes_layout(self, layout_name, node_values):
# the key for the attribute of the observable goes in a little
# namespace prefixed with _observable
layout_key = '_layouts/{}'.format(layout_name)
self.set_nodes_attribute(layout_key, node_values)
# then add to the list of available observables
if layout_name not in self._layouts:
self._layouts.append(layout_name)
def write_gexf(self,
filepath,
exclude_node_fields=None,
exclude_edge_fields=None,
layout=None,
):
"""Writes a graph file in the gexf format of the network.
Parameters
----------
filepath : str
"""
layout_key = None
if layout is not None:
layout_key = '_layouts/{}'.format(layout)
if layout not in self.layouts:
raise ValueError("Layout not found, use None for no layout")
### filter the node and edge attributes
# to do this we need to get rid of the assignments in the
# nodes though since this is not really supported or good to
# store in a gexf file which is more for visualization as an
# XML format, so we copy and modify then write the copy
gexf_graph = deepcopy(self._graph)
## Nodes
if exclude_node_fields is None:
exclude_node_fields = [self.ASSIGNMENTS]
else:
exclude_node_fields.append(self.ASSIGNMENTS)
exclude_node_fields = list(set(exclude_node_fields))
# exclude the layouts, we will set the viz manually for the layout
exclude_node_fields.extend(['_layouts/{}'.format(layout_name)
for layout_name in self.layouts])
for node in gexf_graph:
# remove requested fields
for field in exclude_node_fields:
del gexf_graph.nodes[node][field]
# also remove the fields which are not valid gexf types
fields = list(gexf_graph.nodes[node].keys())
for field in fields:
if (type(gexf_graph.nodes[node][field]) not in
nx.readwrite.gexf.GEXF.xml_type):
del gexf_graph.nodes[node][field]
if layout_key is not None:
# set the layout as viz attributes to this
gexf_graph.nodes[node]['viz'] = self._graph.nodes[node][layout_key]
## Edges
if exclude_edge_fields is None:
exclude_edge_fields = ['all_transitions']
else:
exclude_edge_fields.append('all_transitions')
exclude_edge_fields = list(set(exclude_edge_fields))
# TODO: viz and layouts not supported for edges currently
#
# exclude the layouts, we will set the viz manually for the layout
# exclude_edge_fields.extend(['_layouts/{}'.format(layout_name)
# for layout_name in self.layouts])
for edge in gexf_graph.edges:
# remove requested fields
for field in exclude_edge_fields:
del gexf_graph.edges[edge][field]
# also remove the fields which are not valid gexf types
fields = list(gexf_graph.edges[edge].keys())
for field in fields:
if (type(gexf_graph.edges[edge][field]) not in
nx.readwrite.gexf.GEXF.xml_type):
del gexf_graph.edges[edge][field]
# TODO,SNIPPET: we don't support layouts for the edges,
# but maybe we could
# if layout_key is not None:
# # set the layout as viz attributes to this
# gexf_graph.nodes[node]['viz'] = self._graph.nodes[node][layout_key]
# then write this filtered gexf to file
nx.write_gexf(gexf_graph, filepath)
def nodes_to_records(self,
extra_attributes=('_observables/total_weight',),
):
if extra_attributes is None:
extra_attributes = []
# keys which always go into the records
keys = [
'num_samples',
'node_idx',
]
# add all the groups to the keys
keys.extend(['_groups/{}'.format(key) for key in self.node_groups.keys()])
# add the observables
keys.extend(['_observables/{}'.format(obs) for obs in self.observables])
recs = []
for node_id in self.graph.nodes:
rec = {'node_id' : node_id}
# the keys which are always there
for key in keys:
rec[key] = self.get_node_attribute(node_id, key)
# the user defined ones
for extra_key in extra_attributes:
rec[key] = self.get_node_attribute(node_id, extra_key)
recs.append(rec)
return recs
def nodes_to_dataframe(self,
extra_attributes=('_observables/total_weight',),
):
"""Make a dataframe of the nodes and their attributes.
Not all attributes will be added as they are not relevant to a
table style representation anyhow.
The columns will be:
- node_id
- node_idx
- num samples
- groups (as booleans) which is anything in the '_groups' namespace
- observables : anything in the '_observables' namespace and
will assume to be scalars
And anything in the 'extra_attributes' argument.
"""
# TODO: set the column order
# col_order = []
return pd.DataFrame(self.nodes_to_records(
extra_attributes=extra_attributes
))
def edges_to_records(self,
extra_attributes=None,
):
"""Make a dataframe of the nodes and their attributes.
Not all attributes will be added as they are not relevant to a
table style representation anyhow.
The columns will be:
- edge_id
- source
- target
- weighted_counts
- unweighted_counts
"""
if extra_attributes is None:
extra_attributes = []
keys = [
'weighted_counts',
'unweighted_counts',
]
recs = []
for edge_id in self.graph.edges:
rec = {
'edge_id' : edge_id,
'source' : edge_id[0],
'target' : edge_id[1],
}
for key in keys:
rec[key] = self.graph.edges[edge_id][key]
# the user defined ones
for extra_key in extra_attributes:
rec[key] = self.get_node_attribute(node_id, extra_key)
recs.append(rec)
return recs
def edges_to_dataframe(self,
extra_attributes=None,
):
"""Make a dataframe of the nodes and their attributes.
Not all attributes will be added as they are not relevant to a
table style representation anyhow.
The columns will be:
- edge_id
- source
- target
- weighted_counts
- unweighted_counts
"""
return pd.DataFrame(self.edges_to_records(
extra_attributes=extra_attributes
))
def node_map(self, func, map_func=map):
"""Map a function over the nodes.
The function should take as its first argument a node_id and
the second argument a dictionary of the node attributes. This
will not give access to the underlying trajectory data in the
HDF5, to do this use the 'node_fields_map' function.
Extra args not supported use 'functools.partial' to make
functions with arguments for all data.
Parameters
----------
func : callable
The function to map over the nodes.
map_func : callable
The mapping function, implementing the `map` interface
Returns
-------
node_values : dict of node_id : values
The mapping of node_ids to the values computed by the mapped func.
"""
# wrap the function so that we can pass through the node_id
def func_wrapper(args):
node_id, node_attrs = args
return node_id, func(node_attrs)
# zip the node_ids with the node attributes as an iterator
node_attr_it = ((node_id,
{**self.get_node_attributes(node_id), 'node_id' : node_id})
for node_id in self.graph.nodes
)
return {node_id : value for node_id, value
in map_func(func_wrapper, node_attr_it)}
def edge_attribute_to_matrix(self,
attribute_key,
fill_value=np.nan,
):
"""Convert scalar edge attributes to an assymetric matrix.
This will always return matrices of size (num_nodes,
num_nodes).
Additionally, matrices for the same network will always have
the same indexing, which is according to the 'node_idx'
attribute of each node.
For example if you have a matrix like:
>>> msn = MacroStateNetwork(...)
>>> mat = msn.edge_attribute_to_matrix('unweighted_counts')
Then, for example, the node with node_id of '10' having a
'node_idx' of 0 will always be the first element for each
dimension. Using this example the self edge '10'->'10' can be
accessed from the matrix like:
>>> mat[0,0]
For another node ('node_id' '25') having 'node_idx' 4, we can
get the edge from '10'->'25' like:
>>> mat[0,4]
This is because 'node_id' does not necessarily have to be an
integer, and even if they are integers they don't necessarily
have to be a contiguous range from 0 to N.
To get the 'node_id' for a 'node_idx' use the method
'node_idx_to_id'.
>>> msn.node_idx_to_id(0)
=== 10
Parameters
----------
attribute_key : str
The key of the edge attribute the matrix should be made of.
fill_value : Any
The value to put in the array for non-existent edges. Must
be a numpy dtype compatible with the dtype of the
attribute value.
Returns
-------
edge_matrix : numpy.ndarray
Assymetric matrix of dim (n_macrostates,
n_macrostates). The 0-th axis corresponds to the 'source'
node and the 1-st axis corresponds to the 'target' nodes,
i.e. the dimensions mean: (source, target).
"""
# get the datatype of the attribute and validate it will fit in an array
test_edge_id = list(self.graph.edges.keys())[0]
test_attr_value = self.get_edge_attribute(
test_edge_id,
attribute_key,
)
# duck type check
dt = np.dtype(type(test_attr_value))
# TODO: test that its a numerical type
# get the dtype so we can make the matrix
# assert hasattr(test_attr_value, 'dtype')
# do "duck type" test, if the construction fails it was no good!
# allocate the matrix and initialize to zero for each element
mat = np.full(
(self.num_states,
self.num_states),
fill_value,
dtype=dt,
)
# get a dictionary of (node_id, node_id) -> value
edges_attr_d = self.get_edges_attribute(attribute_key)
# make a dictionary of the edge (source, target) mapped to the
# scalar values
# the mapping id->idx
node_id_to_idx_dict = self.node_id_to_idx_dict()
# convert node_ids to node_idxs
edges_idx_attr_d = {}
for edge, value in edges_attr_d.items():
idx_edge = (node_id_to_idx_dict[edge[0]],
node_id_to_idx_dict[edge[1]])
edges_idx_attr_d[idx_edge] = value
# assign to the array
for trans, value in edges_idx_attr_d.items():
source = trans[0]
target = trans[1]
mat[source, target] = value
return mat
class MacroStateNetwork():
"""Provides an abstraction over weighted ensemble data in the form of
a kinetically connected network.
The MacroStateNetwork refers to any grouping of the so called
"micro" states that were observed during simulation,
i.e. trajectory frames, and not necessarily in the usual sense
used in statistical mechanics. Although it is the perfect vehicle
for working with such macrostates.
Because walker trajectories in weighted ensemble there is a
natural way to generate the edges between the macrostate nodes in
the network. These edges are determined automatically and a lag
time can also be specified, which is useful in the creation of
Markov State Models.
This class provides transparent access to an underlying 'WepyHDF5'
dataset. If you wish to have a simple serializable network that
does not reference see the 'BaseMacroStateNetwork' class, which
you can construct standalone or access the instance attached as
the 'base_network' attribute of an object of this class.
For a description of all of the default node and edge attributes
which are set after construction see the docstring for the
'BaseMacroStateNetwork' class docstring.
Warnings
--------
This class is not serializable as it references a 'WepyHDF5'
object. Either construct a 'BaseMacroStateNetwork' or use the
attached instance in the 'base_network' attribute.
"""
def __init__(self,
contig_tree,
base_network=None,
assg_field_key=None,
assignments=None,
transition_lag_time=2,
):
"""For documentation of the following arguments see the constructor
docstring of the 'BaseMacroStateNetwork' class:
- contig_tree
- assg_field_key
- assignments
- transition_lag_time
The other arguments are documented here. This is primarily
optional 'base_network' argument. This is a
'BaseMacroStateNetwork' instance, which allows you to
associate it with a 'WepyHDF5' dataset for access to the
microstate data etc.
Parameters
----------
base_network : BaseMacroStateNetwork object
An already constructed network, which will avoid
recomputing all in-memory network values again for this
object.
"""
self.closed = True
self._contig_tree = contig_tree
self._wepy_h5 = self._contig_tree.wepy_h5
# if we pass a base network use that one instead of building
# one manually
if base_network is not None:
assert isinstance(base_network, BaseMacroStateNetwork)
self._set_base_network_to_self(base_network)
else:
new_network = BaseMacroStateNetwork(contig_tree,
assg_field_key=assg_field_key,
assignments=assignments,
transition_lag_time=transition_lag_time)
self._set_base_network_to_self(new_network)
def _set_base_network_to_self(self, base_network):
self._base_network = base_network
# then make references to this for the attributes we need
# attributes
self._graph = self._base_network._graph
self._assg_field_key = self._base_network._assg_field_key
self._node_idxs = self._base_network._node_idxs
self._node_idx_to_id_dict = self._base_network._node_idx_to_id_dict
self._transition_lag_time = self._base_network._transition_lag_time
# DEBUG: remove once tested
# self._probmat = self._base_network._probmat
# self._countsmat = self._base_network._countsmat
# functions
self.node_id_to_idx = self._base_network.node_id_to_idx
self.node_idx_to_id = self._base_network.node_idx_to_id
self.node_id_to_idx_dict = self._base_network.node_id_to_idx_dict
self.node_idx_to_id_dict = self._base_network.node_idx_to_id_dict
self.get_node_attributes = self._base_network.get_node_attributes
self.get_node_attribute = self._base_network.get_node_attribute
self.get_nodes_attribute = self._base_network.get_nodes_attribute
self.node_assignments = self._base_network.node_assignments
self.set_nodes_attribute = self._base_network.set_nodes_attribute
self.get_edge_attributes = self._base_network.get_edge_attributes
self.get_edge_attribute = self._base_network.get_edge_attribute
self.get_edges_attribute = self._base_network.get_edges_attribute
self.node_groups = self._base_network.node_groups
self.set_node_group = self._base_network.set_node_group
self._set_group_nodes_attribute = self._base_network._set_group_nodes_attribute
self.observables = self._base_network.observables
self.node_observables = self._base_network.node_observables
self.set_nodes_observable = self._base_network.set_nodes_observable
self.nodes_to_records = self._base_network.nodes_to_records
self.nodes_to_dataframe = self._base_network.nodes_to_dataframe
self.edges_to_records = self._base_network.edges_to_records
self.edges_to_dataframe = self._base_network.edges_to_dataframe
self.node_map = self._base_network.node_map
self.edge_attribute_to_matrix = self._base_network.edge_attribute_to_matrix
self.write_gexf = self._base_network.write_gexf
def open(self, mode=None):
if self.closed:
self.wepy_h5.open(mode=mode)
self.closed = False
else:
raise IOError("This file is already open")
def close(self):
self.wepy_h5.close()
self.closed = True
def __enter__(self):
self.wepy_h5.__enter__()
self.closed = False
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.wepy_h5.__exit__(exc_type, exc_value, exc_tb)
self.close()
# from the Base class
@property
def graph(self):
"""The networkx.DiGraph of the macrostate network."""
return self._graph
@property
def num_states(self):
"""The number of states in the network."""
return len(self.graph)
@property
def node_ids(self):
"""A list of the node_ids."""
return list(self.graph.nodes)
@property
def assg_field_key(self):
"""The string key of the field used to make macro states from the WepyHDF5 dataset.
Raises
------
MacroStateNetworkError
If this wasn't used to construct the MacroStateNetwork.
"""
if self._assg_field_key is None:
raise MacroStateNetworkError("Assignments were manually defined, no key.")
else:
return self._assg_field_key
# @property
# def countsmat(self):
# """Return the transition counts matrix of the network.
# Raises
# ------
# MacroStateNetworkError
# If no lag time was given.
# """
# if self._countsmat is None:
# raise MacroStateNetworkError("transition counts matrix not calculated")
# else:
# return self._countsmat
# @property
# def probmat(self):
# """Return the transition probability matrix of the network.
# Raises
# ------
# MacroStateNetworkError
# If no lag time was given.
# """
# if self._probmat is None:
# raise MacroStateNetworkError("transition probability matrix not set")
# else:
# return self._probmat
# unique to the HDF5 holding one
@property
def base_network(self):
return self._base_network
@property
def wepy_h5(self):
"""The WepyHDF5 source object for which the contig tree is being constructed. """
return self._wepy_h5
def state_to_mdtraj(self, node_id, alt_rep=None):
"""Generate an mdtraj.Trajectory object from a macrostate.
By default uses the "main_rep" in the WepyHDF5
object. Alternative representations of the topology can be
specified.
Parameters
----------
node_id : node_id
alt_rep : str
(Default value = None)
Returns
-------
traj : mdtraj.Trajectory
"""
return self.wepy_h5.trace_to_mdtraj(self.base_network.node_assignments(node_id),
alt_rep=alt_rep)
def state_to_traj_fields(self, node_id, alt_rep=None):
return self.states_to_traj_fields([node_id], alt_rep=alt_rep)
def states_to_traj_fields(self, node_ids, alt_rep=None):
node_assignments = []
for node_id in node_ids:
node_assignments.extend(self.base_network.node_assignments(node_id))
# get the right fields
rep_path = self.wepy_h5._choose_rep_path(alt_rep)
fields = [rep_path, 'box_vectors']
return self.wepy_h5.get_trace_fields(node_assignments,
fields)
def get_node_fields(self, node_id, fields):
"""Return the trajectory fields for all the microstates in the
specified macrostate.
Parameters
----------
node_id : node_id
fields : list of str
Field name to retrieve.
Returns
-------
fields : dict of str: array_like
A dictionary mapping the names of the fields to an array of the field.
Like fields of a trace.
"""
node_trace = self.base_network.node_assignments(node_id)
# use the node_trace to get the weights from the HDF5
fields_d = self.wepy_h5.get_trace_fields(node_trace, fields)
return fields_d
def iter_nodes_fields(self, fields):
"""Iterate over all nodes and return the field values for all the
microstates for each.
Parameters
----------
fields : list of str
Returns
-------
nodes_fields : dict of node_id: (dict of field: array_like)
A dictionary with an entry for each node.
Each node has it's own dictionary of node fields for each microstate.
"""
nodes_d = {}
for node_id in self.graph.nodes:
fields_d = self.base_network.get_node_fields(node_id, fields)
nodes_d[node_id] = fields_d
return nodes_d
def microstate_weights(self):
"""Returns the weights of each microstate on the basis of macrostates.
Returns
-------
microstate_weights : dict of node_id: ndarray
"""
node_weights = {}
for node_id in self.graph.nodes:
# get the trace of the frames in the node
node_trace = self.base_network.node_assignments(node_id)
# use the node_trace to get the weights from the HDF5
trace_weights = self.wepy_h5.get_trace_fields(node_trace, ['weights'])['weights']
node_weights[node_id] = trace_weights
return node_weights
def macrostate_weights(self):
"""Compute the total weight of each macrostate.
Returns
-------
macrostate_weights : dict of node_id: float
"""
macrostate_weights = {}
microstate_weights = self.microstate_weights()
for node_id, weights in microstate_weights.items():
macrostate_weights[node_id] = float(sum(weights)[0])
return macrostate_weights
def set_macrostate_weights(self):
"""Compute the macrostate weights and set them as node attributes
'total_weight'."""
self.base_network.set_nodes_observable(
'total_weight',
self.macrostate_weights(),
)
def node_fields_map(self, func, fields, map_func=map):
"""Map a function over the nodes and microstate fields.
The function should take as its arguments:
1. node_id
2. dictionary of all the node attributes
3. fields dictionary mapping traj field names. (The output of
`MacroStateNetwork.get_node_fields`)
This *will* give access to the underlying trajectory data in
the HDF5 which can be requested with the `fields`
argument. The behaviour is very similar to the
`WepyHDF5.compute_observable` function with the added input
data to the mapped function being all of the macrostate node
attributes.
Extra args not supported use 'functools.partial' to make
functions with arguments for all data.
Parameters
----------
func : callable
The function to map over the nodes.
fields : iterable of str
The microstate (trajectory) fields to provide to the mapped function.
map_func : callable
The mapping function, implementing the `map` interface
Returns
-------
node_values : dict of node_id : values
The mapping of node_ids to the values computed by the mapped func.
Returns
-------
node_values : dict of node_id : values
Dictionary mapping nodes to the computed values from the
mapped function.
"""
# wrap the function so that we can pass through the node_id
def func_wrapper(args):
node_id, node_attrs, node_fields = args
# evaluate the wrapped function
result = func(
node_id,
node_attrs,
node_fields,
)
return node_id, result
# zip the node_ids with the node attributes as an iterator
node_attr_fields_it = (
(node_id,
{**self.get_node_attributes(node_id), 'node_id' : node_id},
self.get_node_fields(node_id, fields),
)
for node_id in self.graph.nodes)
# map the inputs to the wrapped function and return as a
# dictionary for the nodes
return {
node_id : value
for node_id, value
in map_func(func_wrapper, node_attr_fields_it)
}
| mit |
astroML/astroML | astroML/plotting/mcmc.py | 5 | 4279 | import numpy as np
def convert_to_stdev(logL):
"""
Given a grid of log-likelihood values, convert them to cumulative
standard deviation. This is useful for drawing contours from a
grid of likelihoods.
"""
sigma = np.exp(logL)
shape = sigma.shape
sigma = sigma.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(sigma)[::-1]
i_unsort = np.argsort(i_sort)
sigma_cumsum = sigma[i_sort].cumsum()
sigma_cumsum /= sigma_cumsum[-1]
return sigma_cumsum[i_unsort].reshape(shape)
def plot_mcmc(traces, labels=None, limits=None, true_values=None,
fig=None, contour=True, scatter=False,
levels=[0.683, 0.955], bins=20,
bounds=[0.08, 0.08, 0.95, 0.95], **kwargs):
"""Plot a grid of MCMC results
Parameters
----------
traces : array_like
the MCMC chain traces. shape is [Ndim, Nchain]
labels : list of strings (optional)
if specified, the label associated with each trace
limits : list of tuples (optional)
if specified, the axes limits for each trace
true_values : list of floats (optional)
if specified, the true value for each trace (will be indicated with
an 'X' on the plot)
fig : matplotlib.Figure (optional)
the figure on which to draw the axes. If not specified, a new one
will be created.
contour : bool (optional)
if True, then draw contours in each subplot. Default=True.
scatter : bool (optional)
if True, then scatter points in each subplot. Default=False.
levels : list of floats
the list of percentile levels at which to plot contours. Each
entry should be between 0 and 1
bins : int, tuple, array, or tuple of arrays
the binning parameter passed to np.histogram2d. It is assumed that
the point density is constant on the scale of the bins
bounds : list of floats
the bounds of the set of axes used for plotting
additional keyword arguments are passed to scatter() and contour()
Returns
-------
axes_list : list of matplotlib.Axes instances
the list of axes created by the routine
"""
# Import here so that testing with Agg will work
from matplotlib import pyplot as plt
if fig is None:
fig = plt.figure(figsize=(8, 8))
if limits is None:
limits = [(t.min(), t.max()) for t in traces]
if labels is None:
labels = ['' for t in traces]
num_traces = len(traces)
bins = [np.linspace(limits[i][0], limits[i][1], bins + 1)
for i in range(num_traces)]
xmin, xmax = bounds[0], bounds[2]
ymin, ymax = bounds[1], bounds[3]
dx = (xmax - xmin) * 1. / (num_traces - 1)
dy = (ymax - ymin) * 1. / (num_traces - 1)
axes_list = []
for j in range(1, num_traces):
for i in range(j):
ax = fig.add_axes([xmin + i * dx,
ymin + (num_traces - 1 - j) * dy,
dx, dy])
if scatter:
plt.scatter(traces[i], traces[j], **kwargs)
if contour:
H, xbins, ybins = np.histogram2d(traces[i], traces[j],
bins=(bins[i], bins[j]))
H[H == 0] = 1E-16
Nsigma = convert_to_stdev(np.log(H))
ax.contour(0.5 * (xbins[1:] + xbins[:-1]),
0.5 * (ybins[1:] + ybins[:-1]),
Nsigma.T, levels=levels, **kwargs)
if i == 0:
ax.set_ylabel(labels[j])
else:
ax.yaxis.set_major_formatter(plt.NullFormatter())
if j == num_traces - 1:
ax.set_xlabel(labels[i])
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
if true_values is not None:
ax.plot(limits[i], [true_values[j], true_values[j]],
':k', lw=1)
ax.plot([true_values[i], true_values[i]], limits[j],
':k', lw=1)
ax.set_xlim(limits[i])
ax.set_ylim(limits[j])
axes_list.append(ax)
return axes_list
| bsd-2-clause |
wzbozon/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
martinwicke/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 30 | 4777 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()}
class _FeedingFunctionsTestCase(tf.test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {"index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6
+ [[0, 1], [2, 3], [4, 5], [6, 7]]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
nrhine1/scikit-learn | sklearn/utils/estimator_checks.py | 33 | 48331 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
jasoncorso/kittipy | kitti/velodyne.py | 2 | 4979 | import os
import numpy as np
from kitti.data import get_drive_dir, Calib, get_inds, image_shape, get_calib_dir
def get_velodyne_dir(drive, **kwargs):
drive_dir = get_drive_dir(drive, **kwargs)
return os.path.join(drive_dir, 'velodyne_points', 'data')
def load_velodyne_points(drive, frame, **kwargs):
velodyne_dir = get_velodyne_dir(drive, **kwargs)
points_path = os.path.join(velodyne_dir, "%010d.bin" % frame)
points = np.fromfile(points_path, dtype=np.float32).reshape(-1, 4)
points = points[:, :3] # exclude luminance
return points
def load_disparity_points(drive, frame, color=False, **kwargs):
calib = Calib(color=color, **kwargs)
# read velodyne points
points = load_velodyne_points(drive, frame, **kwargs)
# remove all points behind image plane (approximation)
points = points[points[:, 0] >= 1, :]
# convert points to each camera
xyd = calib.velo2disp(points)
# take only points that fall in the first image
xyd = calib.filter_disps(xyd)
return xyd
def lin_interp(shape, xyd):
from scipy.interpolate import LinearNDInterpolator
m, n = shape
ij, d = xyd[:, 1::-1], xyd[:, 2]
f = LinearNDInterpolator(ij, d, fill_value=0)
J, I = np.meshgrid(np.arange(n), np.arange(m))
IJ = np.vstack([I.flatten(), J.flatten()]).T
disparity = f(IJ).reshape(shape)
return disparity
def lstsq_interp(shape, xyd, lamb=1, maxiter=None, valid=True):
import scipy.sparse
import scipy.sparse.linalg
assert xyd.ndim == 2 and xyd.shape[1] == 3
if valid:
# clip out the valid region, and call recursively
j, i, d = xyd.T
i0, i1, j0, j1 = i.min(), i.max(), j.min(), j.max()
subpoints = xyd - [[j0, i0, 0]]
output = -np.ones(shape)
suboutput = output[i0:i1+1, j0:j1+1]
subshape = suboutput.shape
suboutput[:] = lstsq_interp(subshape, subpoints,
lamb=lamb, maxiter=maxiter, valid=False)
return output
Cmask = np.zeros(shape, dtype=bool)
m = np.zeros(shape)
for j, i, d in xyd:
Cmask[i, j] = 1
m[i, j] = d
def calcAA(x):
x = x.reshape(shape)
y = np.zeros_like(x)
# --- smoothness constraints
# L = [[1 -1 0 ...], [0 1 -1 ...], ...] (horizontal first-order)
Lx = -np.diff(x, axis=1)
y[:,0] += Lx[:,0]
y[:,-1] -= Lx[:,-1]
y[:,1:-1] += np.diff(Lx, axis=1)
# T = [[1 0 0 ...], [-1 1 0 ...], [0 -1 0 ...], ...] (vert. 1st-order)
Tx = -np.diff(x, axis=0)
y[0,:] += Tx[0,:]
y[-1,:] -= Tx[-1,:]
y[1:-1,:] += np.diff(Tx, axis=0)
y *= lamb
# --- measurement constraints
y[Cmask] += x[Cmask]
return y.flatten()
n_pixels = np.prod(shape)
G = scipy.sparse.linalg.LinearOperator(
(n_pixels, n_pixels), matvec=calcAA, dtype=np.float)
# x0 = np.zeros(shape)
x0 = lin_interp(shape, xyd)
x, info = scipy.sparse.linalg.cg(G, m.flatten(), x0=x0.flatten(),
maxiter=maxiter)
return x.reshape(shape)
def bp_interp(image_shape, xyd):
from kitti.bp import interp
seed = np.zeros(image_shape, dtype='uint8')
for x, y, d in np.round(xyd):
seed[y, x] = d
# import matplotlib.pyplot as plt
# plt.figure(101)
# plt.hist(seed.flatten(), bins=30)
# # plt.imshow(seed)
# plt.show()
# disp = interp(seed, values=seed.max(), seed_weight=1000, seed_max=10000)
disp = interp(seed, values=seed.max(), seed_weight=10, disc_max=5)
return disp
def bp_stereo_interp(img0, img1, xyd):
from kitti.bp import stereo
assert img0.shape == img1.shape
seed = np.zeros(img0.shape, dtype='uint8')
for x, y, d in np.round(xyd):
seed[y, x] = d
params = dict(values=seed.max(), levels=6, min_level=1,
disc_max=30, seed_weight=1, data_weight=0.01, data_max=100)
# disp = stereo(img0, img1, seed, values=128,
disp = stereo(img0, img1, seed, **params)
return disp
def create_disparity_video(drive, color=False, **kwargs):
import scipy.misc
from kitti.raw import get_disp_dir
disp_dir = get_disp_dir(drive, **kwargs)
if os.path.exists(disp_dir):
raise RuntimeError("Target directory already exists. "
"Please delete '%s' and re-run." % disp_dir)
calib_dir = get_calib_dir(**kwargs)
velodyne_dir = get_velodyne_dir(drive, **kwargs)
inds = get_inds(velodyne_dir, ext='.bin')
os.makedirs(disp_dir)
for i in inds:
points, disps = get_disparity_points(
calib_dir, velodyne_dir, i, color=color)
disp = lstsq_interp(image_shape, points, disps)
disp[disp < 0] = 0
disp = disp.astype('uint8')
path = os.path.join(disp_dir, '%010d.png' % i)
scipy.misc.imsave(path, disp)
print "Created disp image %d" % i
| mit |
febert/DeepRL | easy21/sarsa_lambda.py | 1 | 3748 | import cPickle
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from mpl_toolkits.mplot3d import axes3d
import os
os.environ["FONTCONFIG_PATH"]="/etc/fonts"
#
# import time
#
# def procedure():
# time.sleep(2.5)
#
# # measure process time
# t0 = time.clock()
# procedure()
# print time.clock() - t0, "seconds process time"
from easy21 import *
pkl_file = open('Qtable_monte_carlo_1e6.pkl', 'rb')
Q_table_mc = cPickle.load(pkl_file)
pkl_file.close()
def compare_qtables(Qtable,Q_table_mc):
#np.sum(((Qtable-Q_table_mc)**2).flatten())
return sqrt(mean_squared_error(Qtable.flatten(), Q_table_mc.flatten()))
def runepisode():
#initialize the state and acion randomly at beginning of episode
state = np.random.randint(low = 1, high=10, size=None),np.random.randint(low = 1, high=10, size=None) #player, dealer
A = policy(state)
terminated = False
while( not terminated):
reward, successor, terminated = step(state[0],state[1],A)
#print("successor" , successor)
if not terminated:
A_prime = policy(successor)
Qsprime_aprime = Qtable[successor[0]-1,successor[1]-1,A_prime]
else:
Qsprime_aprime = 0
delta = reward + Qsprime_aprime - Qtable[state[0]-1,state[1]-1,A]
episode.append((state,A,reward))
#counting state visits
Nsa[state[0]-1,state[1]-1,A] += 1
Esa[state[0]-1,state[1]-1,A] += 1
for s, a, reward in episode:
alpha = 1/Nsa[s[0]-1,s[1]-1,a]
Qtable[s[0]-1,s[1]-1,a] += alpha*delta*Esa[s[0]-1,s[1]-1,a]
Esa[s[0]-1,s[1]-1,a]*= lambda_
if not terminated:
A = A_prime
state = successor
def policy(state):
# print(Nsa)
# print Nsa.shape
Ns = Nsa[state[0]-1,state[1]-1,0] + Nsa[state[0]-1,state[1]-1,1]
N_0 = 100
epsilon = N_0/(N_0 + Ns)
explore = np.random.choice([1,0],p=[epsilon, 1-epsilon])
if not explore:
return np.argmax(Qtable[state[0]-1,state[1]-1,:])
else:
return np.random.choice([1,0])
numiter = 100 #00000
error_lists = []
mse_1000 = []
for lambda_ in np.linspace(0,1,11):
mse = []
Qtable = np.zeros((21,10,2))
Nsa = np.zeros((21,10,2))
print lambda_
# policy = np.argmax(Qtable[state])
for i in range(numiter):
episode = [] #just one episode
Esa = np.zeros((21,10,2))
#print i
runepisode()
#compare q tables
if i%1000 == 0:
mse.append(compare_qtables(Qtable,Q_table_mc))
mse_1000.append(compare_qtables(Qtable,Q_table_mc))
error_lists.append(mse)
opt_Valuefunction = np.max(Qtable,2)
print(opt_Valuefunction.shape)
## save to file
save = False
if save:
output = open('Qtable_monte_carlo_1e6.pkl', 'wb')
cPickle.dump(opt_Valuefunction, output)
output.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(range(1,11), range(1,22))
# print(X.shape,Y.shape)
ax.plot_wireframe(X,Y, opt_Valuefunction)
ax.set_xlabel("dealer")
ax.set_ylabel("player")
ax.set_zlabel("value")
fig = plt.figure()
opt_policy = np.argmax(Qtable,2)
plt.imshow(opt_policy,cmap=plt.get_cmap('gray'),interpolation='none')
plt.xlabel("dealer")
plt.ylabel("player")
fig = plt.figure()
plt.plot(mse_1000)
plt.xlabel("lambda")
plt.ylabel("mean squared errror from 1e6 monte carlo")
fig = plt.figure()
for i in range(11):
line1, = plt.plot(range(len(error_lists[i])), error_lists[i], label="lambda ="+str(np.linspace(0,1,11)[i]))
plt.legend(handles=[line1], loc=1)
plt.xlabel("1000 episodes")
plt.ylabel("Mean squared error to Monte Carlo 1e6 ")
plt.show()
| gpl-3.0 |
soazig/project-epsilon-1 | code/utils/scripts/convolution_normal_script.py | 3 | 3081 |
"""
Purpose:
-----------------------------------------------------------------------------------
We generate convolved hemodynamic neural prediction into seperated txt files for
all four conditions (task, gain, lost, distance), and also generate plots for 4
BOLD signals over time for each of them too.
Steps:
-----------------------------------------------------------------------------------
1. Extract 4 conditions of each subject's run
2. Load the data to get the 4th dimension shape
3. Convolve with hrf
4. Plot sampled HRFs with the high resolution neural time course
5. Save the convolved data into txt files
"""
from __future__ import absolute_import, division, print_function
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from stimuli import *
from scipy.stats import gamma
from organize_behavior_data import *
# Create the necessary directories if they do not exist
dirs = ['../../../txt_output', '../../../txt_output/conv_normal',\
'../../../fig','../../../fig/conv_normal']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# Locate the different paths
project_path = '../../../'
data_path = project_path+'data/ds005/'
#change here to get your subject !
subject_list = [str(i) for i in range(1,17)]
#subject_list = ['1','5']
#change here to get your run number !
run_list = [str(i) for i in range(1,4)]
cond_list = [str(i) for i in range(1,5)]
# Loop through conditions by subject and by run
condition_paths = [('ds005_sub' + s.zfill(3) + '_t1r' + r +'_conv'+ c.zfill(3), \
data_path + 'sub' + s.zfill(3) + '/model/model001/onsets/task001_run' \
+ r.zfill(3) + '/cond'+ c.zfill(3) + '.txt') for c in cond_list \
for r in run_list \
for s in subject_list]
condition = ['task','gain','loss','dist']
#Use the first image to get the data dimensions
image_path = data_path + 'sub001/BOLD/task001_run001/bold.nii.gz'
img = nib.load(image_path)
data_int = img.get_data()
data = data_int.astype(float)
#set the TR
TR = 2.0
#get canonical hrf
tr_times = np.arange(0, data.shape[2], TR)
hrf_at_trs = hrf(tr_times)
n_vols = data.shape[-1]
vol_shape = data.shape[:-1]
all_tr_times = np.arange(data.shape[-1]) * TR
for cond_path in condition_paths:
name = cond_path[0]
path = cond_path[1]
cond = np.loadtxt(path, skiprows = 1)
neural_prediction = events2neural(cond,TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs)
convolved = convolved[:-(len(hrf_at_trs)-1)]
#plot
plt.plot(all_tr_times, neural_prediction, label="neural_prediction")
plt.plot(all_tr_times, convolved, label="convolved")
plt.title(name+'_%s'%(condition[int(name[24])-1]))
plt.xlabel('Time (seconds)')
plt.ylabel('Convolved values at TR onsets (condition: %s)'%(condition[int(name[24])-1]))
plt.legend(loc='lower right')
plt.savefig(dirs[3]+'/'+ name +'_canonical.png')
plt.close()
#save the txt file
np.savetxt(dirs[1] +'/'+ name +'_canonical.txt', convolved)
| bsd-3-clause |
kingaza/kaggle | Bowl2017/preprocess.py | 1 | 10729 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 14:29:45 2017
@author: hejiew
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dicom
import os
import h5py
import scipy.ndimage
import scipy.signal
import matplotlib.pyplot as plt
import skimage.util
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# constants
TRAINING_IMAGE_SIZE = 384
INPUT_FOLDER = '../input'
IMAGE_FOLDER = 'sample_images'
OUTPUT_FOLDER = '../output'
LABEL_FILE = "stage1_labels.csv"
DATASET_FILE = "datasets.hdf5"
# Load the scans in given folder path
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.InstanceNumber))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_pixels_hu(scans):
image = np.stack([s.pixel_array for s in scans])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
intercept = scans[0].RescaleIntercept
slope = scans[0].RescaleSlope
if slope != 1:
image = slope * image.astype(np.float64)
image = image.astype(np.int16)
image += np.int16(intercept)
return np.array(image, dtype=np.int16)
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor)
return image, new_spacing
def plot_3d(image, ax, threshold=-300):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
p = p[:,:,::-1]
verts, faces = measure.marching_cubes(p, threshold)
#ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
# plt.show()
def plot_segmented_images(slice_image, orginal_volumn, segmented_lung, filled_lung):
fig = plt.figure(figsize=(20,20))
fig.add_subplot(221).imshow(slice_image, cmap=plt.cm.bone)
plot_3d(orginal_volumn, fig.add_subplot(222, projection='3d'), 400)
plot_3d(segmented_lung, fig.add_subplot(223, projection='3d'), 0)
plot_3d(filled_lung, fig.add_subplot(224, projection='3d'), 0)
plt.show()
return
def largest_label_volume(im, bg=-1):
vals, counts = np.unique(im, return_counts=True)
counts = counts[vals != bg]
vals = vals[vals != bg]
biggest = vals[np.argmax(counts)]
return biggest
def segment_lung_mask(image, fill_lung_structures=True):
# not actually binary, but 1 and 2.
# 0 is treated as background, which we do not want
binary_image = np.array(image > -350, dtype=np.int8)+1
labels = measure.label(binary_image)
# Pick the pixel in the very corner to determine which label is air.
# Improvement: Pick multiple background labels from around the patient
# More resistant to "trays" on which the patient lays cutting the air
# around the person in half
background_label = labels[0,0,0]
#Fill the air around the person
binary_image[background_label == labels] = 2
# Method of filling the lung structures (that is superior to something like
# morphological closing)
if fill_lung_structures:
# For every slice we determine the largest solid structure
for i, axial_slice in enumerate(binary_image):
axial_slice = axial_slice - 1
labeling = measure.label(axial_slice)
l_max = largest_label_volume(labeling, bg=0)
if l_max is not None: #This slice contains some lung
binary_image[i][labeling != l_max] = 1
binary_image -= 1 #Make the image actual binary
binary_image = 1-binary_image # Invert it, lungs are now 1
# Remove other air pockets insided body
labels = measure.label(binary_image, background=0)
l_max = largest_label_volume(labels, bg=0)
if l_max is not None: # There are air pockets
binary_image[labels != l_max] = 0
return binary_image
def normalize(image):
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
def zero_center(image):
PIXEL_MEAN = 0.25
image = image - PIXEL_MEAN
return image
def generate_gaussian_window():
window = scipy.signal.general_gaussian(51, p=1.5, sig=7)
return window
def get_base_image(pixels):
shape = np.shape(pixels)
sum_factor = 0
sum_image = np.reshape(np.zeros(shape[1]*shape[2]), (shape[1], shape[2]))
for i in np.arange(shape[0]):
factor = np.sqrt(1-((i-shape[0]/2)/(shape[0]/2))**2)
sum_factor += factor
sum_image += pixels[i] * factor
return sum_image/sum_factor
def get_partition_images(pixels):
images = []
window_width = 51
partition_width = 5
shape = np.shape(pixels)
window = scipy.signal.gaussian(window_width, std=7)
for i in np.arange(int((shape[0]-window_width)/partition_width)):
sum_factor = 0
sum_image = np.reshape(np.zeros(shape[1]*shape[2]), (shape[1], shape[2]))
for w in np.arange(window_width):
sum_factor += window[w]
sum_image += pixels[i*partition_width+w] * window[w]
images.append(sum_image/sum_factor)
return images
def get_training_image(image, base_image):
return np.concatenate((image, base_image), axis=0)
def plot_dataset(scan):
with h5py.File(os.path.join(OUTPUT_FOLDER, DATASET_FILE), "r") as h5f:
group = h5f[scan]
image_number = len(group)
fig_rows = (image_number-1)//5+1
fig, axes = plt.subplots(fig_rows, 5, figsize=(15, fig_rows*3))
for i in np.arange(image_number):
image = group[list(group)[i]][:]
axes[i//5,i%5].axis('off')
axes[i//5,i%5].imshow(image, cmap=plt.cm.bone)
plt.show()
h5f.close()
if __name__ == '__main__':
with h5py.File(os.path.join(OUTPUT_FOLDER, DATASET_FILE), "w") as dataset_file:
df_label = pd.read_csv(os.path.join(INPUT_FOLDER, LABEL_FILE))
df_label.head()
df_label['shape_1'] = np.zeros(len(df_label), dtype=np.int)
df_label['shape_2'] = np.zeros(len(df_label), dtype=np.int)
df_label['shape_3'] = np.zeros(len(df_label), dtype=np.int)
patients = os.listdir(os.path.join(INPUT_FOLDER, IMAGE_FOLDER))
patients.sort()
df_list = []
for n in np.arange(len(patients)):
if len(df_label.ix[df_label['id']==patients[n]]) == 0:
continue
dataset_group = dataset_file.create_group(patients[n])
patient = load_scan(os.path.join(INPUT_FOLDER, IMAGE_FOLDER, patients[n]))
patient_pixels = get_pixels_hu(patient)
resampled_pixels, spacing = resample(patient_pixels, patient, [2,1,1])
df_label.loc[df_label['id']==patients[n],'shape_1'] = resampled_pixels.shape[0] * 2
df_label.loc[df_label['id']==patients[n],'shape_2'] = resampled_pixels.shape[1]
df_label.loc[df_label['id']==patients[n],'shape_3'] = resampled_pixels.shape[2]
base_image = get_base_image(resampled_pixels)
# print("Shape before resampling\t", patient_pixels.shape)
# print("Shape after resampling\t", resampled_pixels.shape)
dataset_group.create_dataset("base", data=base_image)
# fig, [ax_hist, ax_mid_image, ax_base_image] = plt.subplots(1, 3, figsize=(10, 4))
# ax_hist.hist(resampled_pixels.flatten(), bins=80, color='c')
# ax_mid_image.imshow(resampled_pixels[np.shape(resampled_pixels)[0]//2], cmap=plt.cm.bone)
# ax_base_image.imshow(base_image, cmap=plt.cm.bone)
# plt.savefig(os.path.join(OUTPUT_FOLDER, patients[n]+".png"))
# plt.show()
images = get_partition_images(resampled_pixels)
#np.save(os.path.join(OUTPUT_FOLDER, patients[n]), images)
#print("Patient", patients[n], "has", len(images), "images")
df = pd.DataFrame({'id':list("?"*len(images)),'cancer':np.zeros(len(images),dtype=int)})
for i in np.arange(len(images)):
dataset_group.create_dataset(str(i), data=images[i])
df.loc[i, ['id']] = patients[n] + "_" + str(i)
cancer = df_label.ix[df_label['id']==patients[n]]['cancer'].astype(int).values[0]
df.loc[i, ['cancer']] = cancer
print(patients[n], "done with shape", resampled_pixels.shape)
df_list.append(df[['id','cancer']])
dataset_file.close()
df_datasets = pd.concat(df_list, ignore_index=True)
df_datasets.to_csv(os.path.join(OUTPUT_FOLDER, "datasets.csv"), index=False)
df_label.to_csv(os.path.join(OUTPUT_FOLDER, "labels.csv"), index=False)
# segmented_lung = segment_lung_mask(resampled_pixels, False)
# filled_lung = segment_lung_mask(resampled_pixels, True)
#
# plot_segmented_images(patient_pixels[80],
# resampled_pixels,
# segmented_lung,
# filled_lung-segmented_lung)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.