repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
luofan18/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
has2k1/plotnine | setup.py | 1 | 3346 | """
plotnine is an implementation of a *grammar of graphics* in Python,
it is based on ggplot2. The grammar allows users to compose plots
by explicitly mapping data to the visual objects that make up the
plot.
Plotting with a grammar is powerful, it makes custom (and otherwise
complex) plots are easy to think about and then create, while the
simple plots remain simple.
To find out about all building blocks that you can use to create a
plot, check out the documentation_. Since plotnine has an API
similar to ggplot2, where we lack in coverage the
ggplot2 documentation may be of some help.
.. _documentation: https://plotnine.readthedocs.io/en/stable/
"""
import os
from setuptools import find_packages, setup
import versioneer
__author__ = 'Hassan Kibirige'
__email__ = 'has2k1@gmail.com'
__description__ = "A grammar of graphics for python"
__license__ = 'GPL-2'
__url__ = 'https://github.com/has2k1/plotnine'
__classifiers__ = [
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3 :: Only',
'Framework :: Matplotlib'
]
def check_dependencies():
"""
Check for system level dependencies
"""
pass
def get_required_packages():
"""
Return required packages
Plus any version tests and warnings
"""
install_requires = ['mizani >= 0.7.3',
'matplotlib >= 3.1.1',
'numpy >= 1.19.0',
'scipy >= 1.5.0',
'patsy >= 0.5.1',
'statsmodels >= 0.12.1',
'pandas >= 1.1.0',
# 'geopandas >= 0.3.0',
'descartes >= 1.1.0'
]
return install_requires
def get_extra_packages():
"""
Return extra packages
Plus any version tests and warnings
"""
extras_require = {
'all': ['scikit-learn', 'scikit-misc']
}
return extras_require
def get_package_data():
"""
Return package data
For example:
{'': ['*.txt', '*.rst'],
'hello': ['*.msg']}
means:
- If any package contains *.txt or *.rst files,
include them
- And include any *.msg files found in
the 'hello' package, too:
"""
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('plotnine/tests/baseline_images')]
csv_data = ['data/*.csv']
package_data = {'plotnine': baseline_images + csv_data}
return package_data
if __name__ == '__main__':
check_dependencies()
setup(name='plotnine',
maintainer=__author__,
maintainer_email=__email__,
description=__description__,
long_description=__doc__,
license=__license__,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
url=__url__,
python_requires='>=3.6',
install_requires=get_required_packages(),
extras_require=get_extra_packages(),
packages=find_packages(),
package_data=get_package_data(),
classifiers=__classifiers__,
zip_safe=False)
| gpl-2.0 |
fmacias64/MoodJournalAmerica | viz_data/news_visualizations.py | 2 | 19421 | """
Update data underlying visualizations on Mood Journal America
Some copy & paste from original qacprojects script
Runs on crontab, daily_download -> topicmoddeling.py -> << this >>
@authored malam,habdulkafi 31 June 2014
I/O,query functions originally by rpetchler,malam from qacprojects query script
Changelog:
@updated: malam, 7 July 2014 - added wordcloud query
@updated: habdulkafi, 21 July 2014 - added gtrends,heatmap,movie queries
"""
import codecs
import json
import os
import string
import urlparse
import re
import requests
import bs4
import nltk
import us
import multiprocessing
import MySQLdb
import numpy as np
import pandas as pd
from pandas.io import sql
from numpy import round
from rottentomatoes import RT
static_dir = '/home/twitter-data/website/qacprojects/static/data/twitter_project/tmp_data_test'
# dict based directly on official FIPS code
fipsdict = {'Northeast':{'New England':['09','23','25','33','44','50'],'Middle Atlantic':['34','36','42']},'Midwest':{'East North Central':['18','17','26','39','55'],'West North Central':['19','20','27','29','31','38','46']},'South':{'South Atlantic':['10','11','12','13','24','37','45','51','54'],'East South Central':['01','21','28','47'],'West South Central':['05','22','40','48']},'West':{'Mountain':['04','08','16','35','30','49','32','56'],'Pacific':['02','06','15','41','53']}}
subregions = []
for region in fipsdict.keys():
subregions = subregions + fipsdict[region].keys()
# because we're uploading to two separate databases
with open('p_param1.json') as f:
p = json.load(f)
with open('z_param2.json') as q:
z = json.load(q)
conn = MySQLdb.connect(host=p['mysql']['host'],
user=p['mysql']['user'],
passwd=p['mysql']['passwd'],
db=p['mysql']['db'],
charset='utf8')
# twitter database
conn2 = MySQLdb.connect(host=z['mysql']['host'],
user=z['mysql']['user'],
passwd=z['mysql']['passwd'],
db=z['mysql']['db'],
charset='utf8')
# a bit irrelevant here, inherited from qacprojects script
categories = {1: 'Republican', 2: 'Democrat'}
chambers={0: 'House', 1: 'Senate'}
chambercolors={0: '#F8DC3',1: '#7FBF7B'}
colors = {1: '#D62728', 2: '#1F77B4'}
######################################################################
# I/O Functions
def decorator(func):
"""Decorate I/O functions with filesystem operations."""
def wrapper(filename, data):
filename = os.path.join(static_dir, filename)
func(filename, data)
# os.chmod(filename, 0664)
return wrapper
@decorator
def write_json(filename, data):
"""Write JSON data to a file."""
with codecs.open(filename, 'w', 'utf8') as f:
json.dump(data, f, separators=(',', ':'))
@decorator
def write_csv(filename, df):
"""Write a Pandas DataFrame to CSV format."""
df.to_csv(filename, index=False, encoding='utf8')
@decorator
def write_html(filename, df):
"""Write a Pandas DataFrame to a Boostrap-classed HTML table."""
html = df.to_html(index=False, classes=['table', 'table-condensed'])
html = html.replace('border="1" ', '')
html = html.replace('dataframe ', '')
html = html.replace(' style="text-align: right;"', '')
with codecs.open(filename, 'w', 'utf8') as f:
f.write(html)
######################################################################
# Query Functions
def nest(df, key=None, **kwargs):
"""Nest a series into JSON format for NVD3.
The JavaScript data structure has at least two item: `key`, a
string or integer used as the series label, and `values`, an array
of two-element arrays which contain the series indices and values.
Use kwargs to pass additionally items to the series, such as the
series color.
Args:
df: A Pandas Series.
key: The name of the matching key in the JSON data structure.
Returns:
A nested dictionary which, when appended to a list, is
a suitable data set for visualization with NVD3.
"""
df = [(k, int(v)) for k, v in df.iteritems()]
df = {'key': key, 'values': df}
for k, v in kwargs.items():
df[k] = v
return df
def group_nest(df, key=None, **kwargs):
"""Nest a data frame into JSON format for NVD3.
"""
top = df.groupby('label').agg({'value': np.sum})
top = top.sort('value', ascending=False).head(20).index
nested = []
for name, group in df.groupby('category'):
group = group[['label', 'value']].set_index('label')
group = group.ix[top].fillna(0)
group = nest(group.value, key=categories[name], color=colors[name])
nested.append(group)
return nested
def query_urls(q, params=None, n=20):
"""Counts the number of URLs in a query.
This wraps the URL parsing routine. Ensure that the SQL query
returns a column of URLs.
Args:
q: A string SQL query.
params: An optional list or tuple of query parameters.
n: The number of top records to retrieve.
Returns:
An indexed, one-column Pandas DataFrame. The index contains
URLs and the column contains frequencies.
"""
df = sql.read_frame(q, conn, params=params)
df = df['url'].apply(lambda x: urlparse.urlparse(x).netloc)
df = df.value_counts()
df = df.head(n)
df = pd.DataFrame({'label': df.index, 'value': df}).set_index('label')
return df
# CREATES THE HTML TABLE FOR THE GOOGLE TRENDS
def write_google_html(filename, df):
"""Write a Pandas DataFrame to a Boostrap-classed HTML table."""
html = df.to_html(index=False, columns = ['America\'s Priorities','explained...','SEE FOR YOURSELF'], classes=['table','table-striped'],escape = False)
html = html.replace('border="1" ', '')
html = html.replace('dataframe ', '')
# html = html.replace(' style="text-align: left;"', '')
html = html.replace('style="text-align: right;"','style="text-align: right; margin-right:15px;"')
with codecs.open(filename, 'w', 'UTF-8') as f:
f.write(html)
######################################################################
# Sentiment / Geo
cur = conn.cursor()
q='''SELECT g.fips AS id, g.region, g.sub_region, g.state, CAST( p.pos AS SIGNED INT ) - CAST( p.neg AS SIGNED INT ) AS rate
FROM daily_download AS p
INNER JOIN geo AS g ON g.state = p.state
WHERE p.timestamp = CURDATE()'''
df = sql.read_frame(q, conn)
write_csv('news_sent_overall.csv', df)
# Query for overall topic modelling
q='''SELECT topics FROM daily_overall
WHERE date = CURDATE()'''
df = sql.read_frame(q,conn)
write_csv('topics_overall.txt',df)
# Query for overall topic sentiment rate
q='''SELECT sentiment FROM daily_overall
WHERE date = CURDATE()'''
df = sql.read_frame(q,conn)
write_csv('news-sent-single.csv',df)
# Text for WordCloud
q='''SELECT text,timestamp AS date FROM daily_download
WHERE timestamp = CURDATE()'''
df = sql.read_frame(q,conn)
write_csv('news_text_all.csv',df)
# this query joins daily_download with daily_geo tables
q = u'SELECT d.pos, d.neg, g.fips, g.region, g.sub_region, g.state FROM geo as g INNER JOIN daily_download as d on d.state=g.state WHERE timestamp = CURDATE();'
# creates a pandas dataframe from that query
df = sql.read_frame(q ,conn)
df['rate'] = df['pos'] - df['neg']
sentiment_reg1 = (df[df['region'] == 'Northeast']['pos'].sum() - df[df['region'] == 'Northeast']['neg'].sum() + 0.0) / len(df[df['region'] == 'Northeast'].index)
sentiment_reg2 = (df[df['region'] == 'Midwest']['pos'].sum() - df[df['region'] == 'Midwest']['neg'].sum() + 0.0) / len(df[df['region'] == 'Midwest'].index)
sentiment_reg3 = (df[df['region'] == 'South']['pos'].sum() - df[df['region'] == 'South']['neg'].sum() + 0.0) / len(df[df['region'] == 'South'].index)
sentiment_reg4 = (df[df['region'] == 'West']['pos'].sum() - df[df['region'] == 'West']['neg'].sum() + 0.0) / len(df[df['region'] == 'West'].index)
df.loc[df['region'] == 'Northeast','rrate'] = sentiment_reg1
df.loc[df['region'] == 'Midwest','rrate'] = sentiment_reg2
df.loc[df['region'] == 'South','rrate'] = sentiment_reg3
df.loc[df['region'] == 'West','rrate'] = sentiment_reg4
for subregion in subregions:
df.loc[df['sub_region'] == subregion,'srate'] = (df[df['sub_region'] == subregion]['pos'].sum() - df[df['sub_region'] == subregion]['neg'].sum() + 0.0) / len(df[df['sub_region'] == subregion].index)
df = df.rename(columns = {'fips':'id'})
df = df[['id','region','sub_region','state','rate','rrate','srate']]
df['rrate'] = round(df['rrate'],7)
df['srate'] = round(df['srate'],7)
df.to_csv('/home/twitter-data/website/qacprojects/static/data/twitter_project/tmp_data_test/news_sent_total.csv',index = False)
######### Issue Queries ##############
# Requires a separate connection to twitter database #
##### Human Rights ######
# Per Hour
q='''SELECT UNIX_TIMESTAMP( DATE_ADD( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d %H:00:00" ) , INTERVAL IF( MINUTE( s.created_at ) <30, 0, 1 ) HOUR ) ) *1000 AS label,
CAST( p.positive AS SIGNED INT ) - CAST( p.negative AS SIGNED INT ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 1
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('hr-sent-hour.csv',df)
# Per Day
q='''SELECT UNIX_TIMESTAMP( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d" ) ) *1000 AS label, CAST( p.positive AS SIGNED ) - CAST( p.negative AS SIGNED ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 1
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('hr-sent-day.csv', df)
#### Health Care ######
# Overall
q='''SELECT created_at AS label,
CAST( p.positive AS SIGNED INT ) - CAST( p.negative AS SIGNED INT ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 2
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('hc-sent-overall.csv', df)
# Per Hour
q='''SELECT UNIX_TIMESTAMP( DATE_ADD( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d %H:00:00" ) , INTERVAL IF( MINUTE( s.created_at ) <30, 0, 1 ) HOUR ) ) *1000 AS label,
CAST( p.positive AS SIGNED INT ) - CAST( p.negative AS SIGNED INT ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 2
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('hc-sent-hour.csv', df)
# Per Day
q='''SELECT UNIX_TIMESTAMP( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d" ) ) *1000 AS label, CAST( p.positive AS SIGNED ) - CAST( p.negative AS SIGNED ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 2
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('hc-sent-day.csv', df)
#### Environment ######
# Per Hour
q='''SELECT UNIX_TIMESTAMP( DATE_ADD( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d %H:00:00" ) , INTERVAL IF( MINUTE( s.created_at ) <30, 0, 1 ) HOUR ) ) *1000 AS label,
CAST( p.positive AS SIGNED INT ) - CAST( p.negative AS SIGNED INT ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 3
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('env-sent-hour.csv', df)
# Per Day
q='''SELECT UNIX_TIMESTAMP( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d" ) ) *1000 AS label, CAST( p.positive AS SIGNED ) - CAST( p.negative AS SIGNED ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 3
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('env-sent-day.csv', df)
#### Education ######
# Per Hour
q='''SELECT UNIX_TIMESTAMP( DATE_ADD( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d %H:00:00" ) , INTERVAL IF( MINUTE( s.created_at ) <30, 0, 1 ) HOUR ) ) *1000 AS label,
CAST( p.positive AS SIGNED INT ) - CAST( p.negative AS SIGNED INT ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 4
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('edu-sent-hour.csv', df)
# Per Day
q='''SELECT UNIX_TIMESTAMP( DATE_FORMAT( CONVERT_TZ( s.created_at, '+00:00', '-04:00' ) , "%Y-%m-%d" ) ) *1000 AS label, CAST( p.positive AS SIGNED ) - CAST( p.negative AS SIGNED ) AS value
FROM status AS s
INNER JOIN sentiment AS p ON p.status_id = s.status_id
WHERE `issue_key` = 4
GROUP BY label;'''
df = sql.read_frame(q, conn2)
write_csv('edu-sent-day.csv', df)
#################################################################################
############ GOOGLE TRENDING TOPICS ############################################
regexhtml = re.compile(r'<.*?>')
r = requests.get('http://www.google.com/trends/hottrends/atom/feed?pn=p1')
soup = bs4.BeautifulSoup(r.content) # the prettiest of all soups
mainlist =[]
for title in soup.find_all('title')[1:11]:
tempdict = {}
tempdict['America\'s Priorities'] = regexhtml.sub('',title.text).encode('utf8')
mainlist.append(tempdict)
i = 0
for newstitle in soup.find_all('ht:news_item_title')[:20][0::2]:
mainlist[i]['Newstitle'] = regexhtml.sub('',newstitle.text).encode('utf8')
i += 1
i = 0
for link in soup.find_all('ht:news_item_url')[:20][0::2]:
mainlist[i]['url'] = link.text.encode('utf8')
i += 1
for topic in mainlist:
topic['SEE FOR YOURSELF'] = '<a href="' + topic['url'] + '">' + topic['Newstitle'] + '</a>'
i = 0
for snippet in soup.find_all('ht:news_item_snippet')[:20][0::2]:
mainlist[i]['explained...'] = regexhtml.sub('',snippet.text).encode('utf8')
i += 1
dframe = pd.DataFrame(mainlist)
pd.options.display.max_colwidth = 300 #20000
write_google_html('gtrends.html',dframe[:4])
####################################################################
######## Create Data for Adjacency Matrix #######################
states = us.states.mapping('fips','abbr').keys()
del states[1] # None value
mapp = us.states.mapping("fips","abbr")
subregions = {'Mountain':1,'Pacific':2,'New England':3,'Middle Atlantic':4,'East North Central':5,'West North Central':6,'East South Central':7,'West South Central':8,'South Atlantic':9}
q = u'SELECT g.fips, g.state, d.text FROM geo as g INNER JOIN daily_download as d on d.state=g.state WHERE timestamp = CURDATE();'
df = sql.read_frame(q ,conn)
punct = re.compile(r'^[^A-Za-z0-9]+|[^a-zA-Z0-9]+$')
is_word=re.compile(r'[a-z]', re.IGNORECASE)
sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
word_tokenizer=nltk.tokenize.punkt.PunktWordTokenizer()
def get_words(sentence):
return [punct.sub('',word) for word in word_tokenizer.tokenize(sentence) if is_word.search(word)]
def ngrams(text, n):
for sentence in sentence_tokenizer.tokenize(text.lower()):
words = get_words(sentence)
for i in range(len(words)-(n-1)):
yield(' '.join(words[i:i+n]))
def jaccard(set1,set2):
inter = set1 & set2
# union = set1 | set2
return (float(2.0*len(inter))/float(len(set1) + len(set2)))*100
megadict = {}
for state in states:
megadict[state] = ' '.join(df[df['fips'] == int(state)]['text'].tolist())
for state in megadict.keys():
if len(megadict[state]) == 0:
del megadict[state]
def heatmap(statename):
fulllist = []
for state1 in megadict.keys():
if state1 == statename:
fulllist.append((state1,statename,100.0))
else:
fulllist.append((state1,statename,jaccard(set(ngrams(megadict[state1],4)),set(ngrams(megadict[statename],4)))))
return fulllist
stateiterator = iter(megadict.keys())
pool = multiprocessing.Pool()
j_list = pool.map(heatmap,stateiterator,chunksize = 10)
j_list = [item for sublist in j_list for item in sublist]
df1 = pd.DataFrame(j_list)
df1.columns = ['state1','state2','jaccard']
df1.to_csv('data_heatmap.csv',index=False)
df2 = pd.read_csv("statestdict.csv")
statesdict = df2.to_dict('records')
bigdict = {}
bigdict["nodes"] = []
bigdict["links"] = []
for state in statesdict:
bigdict["nodes"].append({"name":state["state"],"group":subregions[state["subregion"]]})
df1 = df1.to_dict('records')
for row in df1:
row['state1'] = str(int(row['state1']))
if len(row['state1']) == 1:
row['state1'] = '0' + row['state1']
row['state2'] = str(int(row['state2']))
if len(row['state2']) == 1:
row['state2'] = '0' + row['state2']
for i in bigdict["nodes"]:
for j in bigdict["nodes"]:
for row in df1:
if mapp[row['state1']] == i['name'] and mapp[row['state2']] == j['name']:
jaccard = row["jaccard"]
bigdict["links"].append({"source":bigdict["nodes"].index(i),"target":bigdict["nodes"].index(j),"value":jaccard})
with codecs.open('ajmatrix.json','w','utf8') as f:
json.dump(bigdict, f, separators=(',',':'))
####################################################################
######## Data for America's Daily Movie #####################
reg1 = re.compile("\$\(function.+")
reg2 = re.compile('\s')
r = requests.get("http://instantwatcher.com/") # instantwatcher has exclusive access to Netflix data despite API depreciate
soup = bs4.BeautifulSoup(r.content)
pop_titles = soup.findAll('div', {'class': 'span-8 homepage-most-popular'}) #always located here
def parse_string(el):
text = ''.join(el.findAll(text=True))
return text.strip()
for title in pop_titles:
titles = map(parse_string,title.findAll('a'))
synop = []
for i in titles:
try:
wholedict = RT(auth_key).search(i)
allinks = wholedict[0]['links']['alternate']
characters = []
for j in wholedict[0]['abridged_cast']:
characters.append(j['name'].encode('utf8'))
r = requests.get(allinks)
soup = bs4.BeautifulSoup(r.content)
tempdict = {}
tempdict['title'] = i
tempdict['synopsis'] = re.sub(reg1,'',re.sub(reg2,' ',soup.findAll('p', {'id':"movieSynopsis"})[0].text)).encode('utf8')
for actor in characters:
tempdict['actor_' + str(characters.index(actor))] = actor
tempdict['rating'] = wholedict[0]['mpaa_rating']
synop.append(tempdict)
except Exception, e:
print i,e
df = pandas.DataFrame(synop)
all_actors = df['actor_0'].tolist() + df['actor_1'].tolist() + df['actor_2'].tolist() + df['actor_3'].tolist() + df['actor_4'].tolist()
top_actors = {}
for actor in all_actors:
if type(actor) == type(0.0):
all_actors.remove(actor)
elif actor in top_actors.keys():
top_actors[actor] += 1
else:
top_actors[actor] = 1
ratings = df['rating']
top_rating = {}
for rating in ratings:
if type(rating) == type(0.0):
ratings.remove(rating)
elif rating in top_rating.keys():
top_rating[rating] += 1
else:
top_rating[rating] = 1
sorted_actors = sorted(top_actors, key=top_actors.get,reverse=True)
sorted_ratings = sorted(top_rating, key=top_rating.get,reverse=True)
with open('daily_movies.csv','wb') as f:
wr = csv.writer(f)
wr.writerow(['actors','rating'])
for ac in sorted_actors:
wr.writerow([ac,sorted_ratings[0]])
f.close()
df.to_csv('movie_synopses.csv',index=False)
conn.close()
conn2.close()
| bsd-3-clause |
pgrinaway/yank | Yank/utils.py | 1 | 61480 | import os
import re
import sys
import copy
import glob
import json
import shutil
import signal
import pandas
import inspect
import logging
import itertools
import subprocess
import collections
from contextlib import contextmanager
from pkg_resources import resource_filename
import mdtraj
import parmed
import numpy as np
from simtk import unit
from schema import Optional, Use
from openmoltools.utils import wraps_py2, unwrap_py2 # Shortcuts for other modules
#========================================================================================
# Logging functions
#========================================================================================
def is_terminal_verbose():
"""Check whether the logging on the terminal is configured to be verbose.
This is useful in case one wants to occasionally print something that is not really
relevant to yank's log (e.g. external library verbose, citations, etc.).
Returns
is_verbose : bool
True if the terminal is configured to be verbose, False otherwise.
"""
# If logging.root has no handlers this will ensure that False is returned
is_verbose = False
for handler in logging.root.handlers:
# logging.FileHandler is a subclass of logging.StreamHandler so
# isinstance and issubclass do not work in this case
if type(handler) is logging.StreamHandler and handler.level <= logging.DEBUG:
is_verbose = True
break
return is_verbose
def config_root_logger(verbose, log_file_path=None, mpicomm=None):
"""Setup the the root logger's configuration.
The log messages are printed in the terminal and saved in the file specified
by log_file_path (if not None) and printed. Note that logging use sys.stdout
to print logging.INFO messages, and stderr for the others. The root logger's
configuration is inherited by the loggers created by logging.getLogger(name).
Different formats are used to display messages on the terminal and on the log
file. For example, in the log file every entry has a timestamp which does not
appear in the terminal. Moreover, the log file always shows the module that
generate the message, while in the terminal this happens only for messages
of level WARNING and higher.
Parameters
----------
verbose : bool
Control the verbosity of the messages printed in the terminal. The logger
displays messages of level logging.INFO and higher when verbose=False.
Otherwise those of level logging.DEBUG and higher are printed.
log_file_path : str, optional, default = None
If not None, this is the path where all the logger's messages of level
logging.DEBUG or higher are saved.
mpicomm : mpi4py.MPI.COMM communicator, optional, default=None
If specified, this communicator will be used to determine node rank.
"""
class TerminalFormatter(logging.Formatter):
"""
Simplified format for INFO and DEBUG level log messages.
This allows to keep the logging.info() and debug() format separated from
the other levels where more information may be needed. For example, for
warning and error messages it is convenient to know also the module that
generates them.
"""
# This is the cleanest way I found to make the code compatible with both
# Python 2 and Python 3
simple_fmt = logging.Formatter('%(asctime)-15s: %(message)s')
default_fmt = logging.Formatter('%(asctime)-15s: %(levelname)s - %(name)s - %(message)s')
def format(self, record):
if record.levelno <= logging.INFO:
return self.simple_fmt.format(record)
else:
return self.default_fmt.format(record)
# Check if root logger is already configured
n_handlers = len(logging.root.handlers)
if n_handlers > 0:
root_logger = logging.root
for i in range(n_handlers):
root_logger.removeHandler(root_logger.handlers[0])
# If this is a worker node, don't save any log file
if mpicomm:
rank = mpicomm.rank
else:
rank = 0
# Create different log files for each MPI process
if rank != 0 and log_file_path is not None:
basepath, ext = os.path.splitext(log_file_path)
log_file_path = '{}_{}{}'.format(basepath, rank, ext)
# Add handler for stdout and stderr messages
terminal_handler = logging.StreamHandler()
terminal_handler.setFormatter(TerminalFormatter())
if rank != 0:
terminal_handler.setLevel(logging.WARNING)
elif verbose:
terminal_handler.setLevel(logging.DEBUG)
else:
terminal_handler.setLevel(logging.INFO)
logging.root.addHandler(terminal_handler)
# Add file handler to root logger
if log_file_path is not None:
file_format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(file_format))
logging.root.addHandler(file_handler)
# Do not handle logging.DEBUG at all if unnecessary
if log_file_path is not None:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(terminal_handler.level)
# =======================================================================================
# MPI utility functions
# =======================================================================================
def initialize_mpi():
"""Initialize and configure MPI to handle correctly terminate.
Returns
-------
mpicomm : mpi4py communicator
The communicator for this node.
"""
# Check for environment variables set by mpirun. Variables are from
# http://docs.roguewave.com/threadspotter/2012.1/linux/manual_html/apas03.html
variables = ['PMI_RANK', 'OMPI_COMM_WORLD_RANK', 'OMPI_MCA_ns_nds_vpid',
'PMI_ID', 'SLURM_PROCID', 'LAMRANK', 'MPI_RANKID',
'MP_CHILD', 'MP_RANK', 'MPIRUN_RANK']
use_mpi = False
for var in variables:
if var in os.environ:
use_mpi = True
break
if not use_mpi:
return None
# Initialize MPI
from mpi4py import MPI
MPI.COMM_WORLD.barrier()
mpicomm = MPI.COMM_WORLD
# Override sys.excepthook to abort MPI on exception
def mpi_excepthook(type, value, traceback):
sys.__excepthook__(type, value, traceback)
sys.stdout.flush()
sys.stderr.flush()
if mpicomm.size > 1:
mpicomm.Abort(1)
# Use our eception handler
sys.excepthook = mpi_excepthook
# Catch sigterm signals
def handle_signal(signal, frame):
if mpicomm.size > 1:
mpicomm.Abort(1)
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGABRT]:
signal.signal(sig, handle_signal)
return mpicomm
@contextmanager
def delay_termination():
"""Context manager to delay handling of termination signals."""
signals_to_catch = [signal.SIGINT, signal.SIGTERM, signal.SIGABRT]
old_handlers = {signum: signal.getsignal(signum) for signum in signals_to_catch}
signals_received = {signum: None for signum in signals_to_catch}
def delay_handler(signum, frame):
signals_received[signum] = (signum, frame)
# Set handlers fot delay
for signum in signals_to_catch:
signal.signal(signum, delay_handler)
yield # Resume program
# Restore old handlers
for signum, handler in listitems(old_handlers):
signal.signal(signum, handler)
# Fire delayed signals
for signum, s in listitems(signals_received):
if s is not None:
old_handlers[signum](*s)
def delayed_termination(func):
"""Decorator to delay handling of termination signals during function execution."""
@wraps_py2(func)
def _delayed_termination(*args, **kwargs):
with delay_termination():
return func(*args, **kwargs)
return _delayed_termination
# =======================================================================================
# Combinatorial tree
# =======================================================================================
class CombinatorialLeaf(list):
"""List type that can be expanded combinatorially in CombinatorialTree."""
def __repr__(self):
return "Combinatorial({})".format(super(CombinatorialLeaf, self).__repr__())
class CombinatorialTree(collections.MutableMapping):
"""A tree that can be expanded in a combinatorial fashion.
Each tree node with its subnodes is represented as a nested dictionary. Nodes can be
accessed through their specific "path" (i.e. the list of the nested dictionary keys
that lead to the node value).
Values of a leaf nodes that are list-like objects can be expanded combinatorially in
the sense that it is possible to iterate over all possible combinations of trees that
are generated by taking leaf node list and create a sequence of trees, each one
defining only one of the single values in those lists per leaf node (see Examples).
Examples
--------
Set an arbitrary nested path
>>> tree = CombinatorialTree({'a': {'b': 2}})
>>> path = ('a', 'b')
>>> tree[path]
2
>>> tree[path] = 3
>>> tree[path]
3
Paths can be accessed also with the usual dict syntax
>>> tree['a']['b']
3
Deletion of a node leave an empty dict!
>>> del tree[path]
>>> print(tree)
{'a': {}}
Expand all possible combinations of a tree. The iterator return a dict, not another
CombinatorialTree object.
>>> import pprint # pprint sort the dictionary by key before printing
>>> tree = CombinatorialTree({'a': 1, 'b': CombinatorialLeaf([1, 2]),
... 'c': {'d': CombinatorialLeaf([3, 4])}})
>>> for t in tree:
... pprint.pprint(t)
{'a': 1, 'b': 1, 'c': {'d': 3}}
{'a': 1, 'b': 2, 'c': {'d': 3}}
{'a': 1, 'b': 1, 'c': {'d': 4}}
{'a': 1, 'b': 2, 'c': {'d': 4}}
Expand all possible combinations and assign unique names
>>> for name, t in tree.named_combinations(separator='_', max_name_length=5):
... print(name)
3_1
3_2
4_1
4_2
"""
def __init__(self, dictionary):
"""Build a combinatorial tree from the given dictionary."""
self._d = copy.deepcopy(dictionary)
def __getitem__(self, path):
try:
return self._d[path]
except KeyError:
return self._resolve_path(self._d, path)
def __setitem__(self, path, value):
d_node = self.__getitem__(path[:-1])
d_node[path[-1]] = value
def __delitem__(self, path):
d_node = self.__getitem__(path[:-1])
del d_node[path[-1]]
def __len__(self):
return len(self._d)
def __str__(self):
return str(self._d)
def __eq__(self, other):
return self._d == other
def __iter__(self):
"""Iterate over all possible combinations of trees.
The iterator returns dict objects, not other CombinatorialTrees.
"""
leaf_paths, leaf_vals = self._find_combinatorial_leaves()
return self._combinations_generator(leaf_paths, leaf_vals)
def named_combinations(self, separator, max_name_length):
"""Iterate over all possible combinations of trees and assign them unique names.
The names are generated by gluing together the first letters of the values of
the combinatorial leaves only, separated by the given separator. If the values
contain special characters, they are ignored. Only letters, numbers and the
separator are found in the generated names. Values representing paths to
existing files contribute to the name only with they file name without extensions.
The iterator yields tuples of (name, dict), not other CombinatorialTrees. If
there is only a single combination, an empty string is returned for the name.
Parameters
----------
separator : str
The string used to separate the words in the name.
max_length : int
The maximum length of the generated names, excluding disambiguation number.
"""
leaf_paths, leaf_vals = self._find_combinatorial_leaves()
generated_names = {} # name: count, how many times we have generated the same name
# Compile regular expression used to discard special characters
filter = re.compile('[^A-Za-z\d]+')
# Iterate over combinations
for combination in self._combinations_generator(leaf_paths, leaf_vals):
# Retrieve single values of combinatorial leaves
filtered_vals = [str(self._resolve_path(combination, path)) for path in leaf_paths]
# Strip down file paths to only the file name without extensions
for i, val in enumerate(filtered_vals):
if os.path.exists(val):
filtered_vals[i] = os.path.basename(val).split(os.extsep)[0]
# Filter special characters in values that we don't use for names
filtered_vals = [filter.sub('', val) for val in filtered_vals]
# Generate name
if len(filtered_vals) == 0:
name = ''
elif len(filtered_vals) == 1:
name = filtered_vals[0][:max_name_length]
else:
name = separator.join(filtered_vals)
original_vals = filtered_vals[:]
while len(name) > max_name_length:
# Sort the strings by descending length, if two values have the
# same length put first the one whose original value is the shortest
sorted_vals = sorted(enumerate(filtered_vals), reverse=True,
key=lambda x: (len(x[1]), -len(original_vals[x[0]])))
# Find how many strings have the maximum length
max_val_length = len(sorted_vals[0][1])
n_max_vals = len([x for x in sorted_vals if len(x[1]) == max_val_length])
# We trim the longest str by the necessary number of characters
# to reach max_name_length or the second longest value
length_diff = len(name) - max_name_length
if n_max_vals < len(filtered_vals):
second_max_val_length = len(sorted_vals[n_max_vals][1])
length_diff = min(length_diff, max_val_length - second_max_val_length)
# Trim all the longest strings by few characters
for i in range(n_max_vals - 1, -1, -1):
# Division truncation ensures that we trim more the
# ones whose original value is the shortest
char_per_str = int(length_diff / (i + 1))
if char_per_str != 0:
idx = sorted_vals[i][0]
filtered_vals[idx] = filtered_vals[idx][:-char_per_str]
length_diff -= char_per_str
name = separator.join(filtered_vals)
if name in generated_names:
generated_names[name] += 1
name += separator + str(generated_names[name])
else:
generated_names[name] = 1
yield name, combination
def expand_id_nodes(self, id_nodes_path, update_nodes_paths):
"""Return a new CombinatorialTree with id-bearing nodes expanded
and updated in the rest of the script.
Parameters
----------
id_nodes_path : tuple of str
The path to the parent node containing ids.
update_nodes_paths : list of tuple of str
A list of all the paths referring to the ids expanded. The string '*'
means every node.
Returns
-------
expanded_tree : CombinatorialTree
The tree with id nodes expanded.
Examples
--------
>>> d = {'molecules':
... {'mol1': {'mol_value': CombinatorialLeaf([1, 2])}},
... 'systems':
... {'sys1': {'molecules': 'mol1'},
... 'sys2': {'prmtopfile': 'mysystem.prmtop'}}}
>>> update_nodes_paths = [('systems', '*', 'molecules')]
>>> t = CombinatorialTree(d).expand_id_nodes('molecules', update_nodes_paths)
>>> t['molecules'] == {'mol1_1': {'mol_value': 1}, 'mol1_2': {'mol_value': 2}}
True
>>> t['systems'] == {'sys1': {'molecules': CombinatorialLeaf(['mol1_2', 'mol1_1'])},
... 'sys2': {'prmtopfile': 'mysystem.prmtop'}}
True
"""
expanded_tree = copy.deepcopy(self)
combinatorial_id_nodes = {} # map combinatorial_id -> list of combination_ids
for id_node_key, id_node_val in self.__getitem__(id_nodes_path).items():
# Find all combinations and expand them
id_node_val = CombinatorialTree(id_node_val)
combinations = {id_node_key + '_' + name: comb for name, comb
in id_node_val.named_combinations(separator='_', max_name_length=30)}
if len(combinations) > 1:
# Substitute combinatorial node with all combinations
del expanded_tree[id_nodes_path][id_node_key]
expanded_tree[id_nodes_path].update(combinations)
# We need the combinatorial_id_nodes substituted to an id_node_key
# to have a deterministic value or MPI parallel processes will
# iterate over combinations in different orders
combinatorial_id_nodes[id_node_key] = sorted(combinations.keys())
# Update ids in the rest of the tree
for update_path in update_nodes_paths:
for update_node_key, update_node_val in self._resolve_paths(self._d, update_path):
# Check if the value is a collection or a scalar
if isinstance(update_node_val, list):
for v in update_node_val:
if v in combinatorial_id_nodes:
i = expanded_tree[update_node_key].index(v)
expanded_tree[update_node_key][i:i+1] = combinatorial_id_nodes[v]
elif update_node_val in combinatorial_id_nodes:
comb_leaf = CombinatorialLeaf(combinatorial_id_nodes[update_node_val])
expanded_tree[update_node_key] = comb_leaf
return expanded_tree
@staticmethod
def _resolve_path(d, path):
"""Retrieve the value of a nested key in a dictionary.
Parameters
----------
d : dict
The nested dictionary.
path : iterable of keys
The "path" to the node of the dictionary.
Return
------
The value contained in the node pointed by the path.
"""
accum_value = d
for node_key in path:
accum_value = accum_value[node_key]
return accum_value
@staticmethod
def _resolve_paths(d, path):
"""Retrieve all the values of a nested key in a dictionary.
Paths containing the string '*' are interpreted as any node and
are yielded one by one.
Parameters
----------
d : dict
The nested dictionary.
path : iterable of str
The "path" to the node of the dictionary. The character '*'
means any node.
Examples
--------
>>> d = {'nested': {'correct1': {'a': 1}, 'correct2': {'a': 2}, 'wrong': {'b': 3}}}
>>> p = [x for x in CombinatorialTree._resolve_paths(d, ('nested', '*', 'a'))]
>>> print(sorted(p))
[(('nested', 'correct1', 'a'), 1), (('nested', 'correct2', 'a'), 2)]
"""
try:
if len(path) == 0:
yield (), d
elif len(path) == 1:
yield (path[0],), d[path[0]]
else:
if path[0] == '*':
keys = d.keys()
else:
keys = [path[0]]
for key in keys:
for p, v in CombinatorialTree._resolve_paths(d[key], path[1:]):
if v is not None:
yield (key,) + p, v
except KeyError:
yield None, None
def _find_leaves(self):
"""Traverse a dict tree and find the leaf nodes.
Returns
-------
A tuple containing two lists. The first one is a list of paths to the leaf
nodes in a tuple format (e.g. the path to node['a']['b'] is ('a', 'b')) while
the second one is a list of all the values of those leaf nodes.
Examples
--------
>>> simple_tree = CombinatorialTree({'simple': {'scalar': 1,
... 'vector': [2, 3, 4],
... 'nested': {
... 'leaf': ['a', 'b', 'c']}}})
>>> leaf_paths, leaf_vals = simple_tree._find_leaves()
>>> leaf_paths
[('simple', 'scalar'), ('simple', 'vector'), ('simple', 'nested', 'leaf')]
>>> leaf_vals
[1, [2, 3, 4], ['a', 'b', 'c']]
"""
def recursive_find_leaves(node):
leaf_paths = []
leaf_vals = []
for child_key, child_val in listitems(node):
if isinstance(child_val, collections.Mapping):
subleaf_paths, subleaf_vals = recursive_find_leaves(child_val)
# prepend child key to path
leaf_paths.extend([(child_key,) + subleaf for subleaf in subleaf_paths])
leaf_vals.extend(subleaf_vals)
else:
leaf_paths.append((child_key,))
leaf_vals.append(child_val)
return leaf_paths, leaf_vals
return recursive_find_leaves(self._d)
def _find_combinatorial_leaves(self):
"""Traverse a dict tree and find CombinatorialLeaf nodes.
Returns
-------
combinatorial_leaf_paths, combinatorial_leaf_vals : tuple of tuples
combinatorial_leaf_paths is a tuple of paths to combinatorial leaf
nodes in tuple format (e.g. the path to node['a']['b'] is ('a', 'b'))
while combinatorial_leaf_vals is the tuple of the values of those nodes.
The list of paths is guaranteed to be sorted by alphabetical order.
"""
leaf_paths, leaf_vals = self._find_leaves()
# Filter leaves that are not combinatorial
combinatorial_ids = [i for i, val in enumerate(leaf_vals) if isinstance(val, CombinatorialLeaf)]
combinatorial_leaf_paths = [leaf_paths[i] for i in combinatorial_ids]
combinatorial_leaf_vals = [leaf_vals[i] for i in combinatorial_ids]
# Sort leaves by alphabetical order of the path
if len(combinatorial_leaf_paths) > 0:
combinatorial_leaf_paths, combinatorial_leaf_vals = zip(*sorted(zip(combinatorial_leaf_paths,
combinatorial_leaf_vals)))
return combinatorial_leaf_paths, combinatorial_leaf_vals
def _combinations_generator(self, leaf_paths, leaf_vals):
"""Generate all possible combinations of experiments.
The iterator returns dict objects, not other CombinatorialTrees.
Parameters
----------
leaf_paths : list of tuples of strings
The list of paths as returned by _find_leaves().
leaf_vals : list
The list of the correspondent values as returned by _find_leaves().
"""
template_tree = CombinatorialTree(self._d)
# All leaf values must be CombinatorialLeafs at this point
assert all(isinstance(leaf_val, CombinatorialLeaf) for leaf_val in leaf_vals)
# generating all combinations
for combination in itertools.product(*leaf_vals):
# update values of template tree
for leaf_path, leaf_val in zip(leaf_paths, combination):
template_tree[leaf_path] = leaf_val
yield copy.deepcopy(template_tree._d)
#========================================================================================
# Miscellaneous functions
#========================================================================================
def get_data_filename(relative_path):
"""Get the full path to one of the reference files shipped for testing
In the source distribution, these files are in ``examples/*/``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
name : str
Name of the file to load, with respect to the yank egg folder which
is typically located at something like
~/anaconda/lib/python2.7/site-packages/yank-*.egg/examples/
"""
fn = resource_filename('yank', relative_path)
if not os.path.exists(fn):
raise ValueError("Sorry! %s does not exist. If you just added it, you'll have to re-install" % fn)
return fn
def find_phases_in_store_directory(store_directory):
"""Build a list of phases in the store directory.
Parameters
----------
store_directory : str
The directory to examine for stored phase NetCDF data files.
Returns
-------
phases : dict of str
A dictionary phase_name -> file_path that maps phase names to its NetCDF
file path.
"""
full_paths = glob.glob(os.path.join(store_directory, '*.nc'))
phases = {}
for full_path in full_paths:
file_name = os.path.basename(full_path)
short_name, _ = os.path.splitext(file_name)
phases[short_name] = full_path
if len(phases) == 0:
raise RuntimeError("Could not find any valid YANK store (*.nc) files in "
"store directory: {}".format(store_directory))
return phases
def is_iterable_container(value):
"""Check whether the given value is a list-like object or not.
Returns
-------
Flase if value is a string or not iterable, True otherwise.
"""
# strings are iterable too so we have to treat them as a special case
return not isinstance(value, str) and isinstance(value, collections.Iterable)
# ==============================================================================
# Conversion utilities
# ==============================================================================
def serialize_topology(topology):
"""Serialize topology to string.
Parameters
----------
topology : mdtraj.Topology, simtk.openmm.app.Topology
The topology object to serialize.
Returns
-------
serialized_topology : str
String obtained by jsonizing the return value of to_dataframe() of the
mdtraj Topology object.
"""
# Check if we need to convert the topology to mdtraj
if isinstance(topology, mdtraj.Topology):
mdtraj_top = topology
else:
mdtraj_top = mdtraj.Topology.from_openmm(topology)
atoms, bonds = mdtraj_top.to_dataframe()
separators = (',', ':') # don't dump whitespaces to save space
serialized_topology = json.dumps({'atoms': atoms.to_json(orient='records'),
'bonds': bonds.tolist()},
separators=separators)
return serialized_topology
def deserialize_topology(serialized_topology):
"""Serialize topology to string.
Parameters
----------
serialized_topology : str
Serialized topology as returned by serialize_topology().
Returns
-------
topology : mdtraj.Topology
The deserialized topology object.
"""
topology_dict = json.loads(serialized_topology)
atoms = pandas.read_json(topology_dict['atoms'], orient='records')
bonds = np.array(topology_dict['bonds'])
topology = mdtraj.Topology.from_dataframe(atoms, bonds)
return topology
def typename(atype):
"""Convert a type object into a fully qualified typename.
Parameters
----------
atype : type
The type to convert
Returns
-------
typename : str
The string typename.
For example,
>>> typename(type(1))
'int'
>>> import numpy
>>> x = numpy.array([1,2,3], numpy.float32)
>>> typename(type(x))
'numpy.ndarray'
"""
if not isinstance(atype, type):
raise Exception('Argument is not a type')
modulename = atype.__module__
typename = atype.__name__
if modulename != '__builtin__':
typename = modulename + '.' + typename
return typename
def merge_dict(dict1, dict2):
"""Return the union of two dictionaries.
In Python 3.5 there is a syntax to do this {**dict1, **dict2} but
in Python 2 you need to go through update().
"""
merged_dict = dict1.copy()
merged_dict.update(dict2)
return merged_dict
def underscore_to_camelcase(underscore_str):
"""Convert the given string from underscore_case to camelCase.
Underscores at the beginning or at the end of the string are ignored. All
underscores in the middle of the string are removed.
Parameters
----------
underscore_str : str
String in underscore_case to convert to camelCase style.
Returns
-------
camelcase_str : str
String in camelCase style.
Examples
--------
>>> underscore_to_camelcase('__my___variable_')
'__myVariable_'
"""
# Count leading and trailing '_' characters
n_leading = re.search(r'[^_]', underscore_str)
if n_leading is None: # this is empty or contains only '_'s
return underscore_str
n_leading = n_leading.start()
n_trailing = re.search(r'[^_]', underscore_str[::-1]).start()
# Remove all underscores, join and capitalize
words = underscore_str.split('_')
camelcase_str = '_' * n_leading + words[n_leading]
camelcase_str += ''.join(str.capitalize(word) for word in words[n_leading + 1:])
camelcase_str += '_' * n_trailing
return camelcase_str
def camelcase_to_underscore(camelcase_str):
"""Convert the given string from camelCase to underscore_case.
Parameters
----------
camelcase_str : str
String in camelCase to convert to underscore style.
Returns
-------
underscore_str : str
String in underscore style.
Examples
--------
>>> camelcase_to_underscore('myVariable')
'my_variable'
>>> camelcase_to_underscore('__my_Variable_')
'__my__variable_'
"""
underscore_str = re.sub(r'([A-Z])', '_\g<1>', camelcase_str)
return underscore_str.lower()
def quantity_from_string(quantity_str):
"""
Generate a simtk.unit.Quantity object from a string of arbitrary nested strings
Parameters
----------
quantity_str : string
A string containing a value with a unit of measure
Returns
-------
quantity : simtk.unit.Quantity
The specified string, returned as a Quantity
Raises
------
AttributeError
If quantity_str does not contain any parsable data
TypeError
If quantity_str does not contain units
Examples
--------
>>> quantity_from_string("1*atmosphere")
Quantity(value=1.0, unit=atmosphere)
>>> quantity_from_string("'1 * joule / second'")
Quanity(value=1, unit=joule/second)
"""
# Strip out (possible) surrounding quotes
quote_pattern = '[^\'"]+'
try:
quantity_str = re.search(quote_pattern, quantity_str).group()
except AttributeError as e:
raise AttributeError("Please pass a quantity in format of '#*unit'. e.g. '1*atmosphere'")
# Parse String
operators = ['(', ')', '*', '/']
def find_operator(passed_str):
# Process the current string until the next operator
for i, char in enumerate(passed_str):
if char in operators:
break
return i
def nested_string(passed_str):
def exponent_unit(passed_str):
# Attempt to cast argument as an exponenet
future_operator_loc = find_operator(passed_str)
future_operator = passed_str[future_operator_loc]
if future_operator == '(': # This catches things like x**(3*2), rare, but it could happen
exponent, exponent_type, exp_count_indices = nested_string(passed_str[future_operator_loc+1:])
elif future_operator_loc == 0:
# No more operators
exponent = passed_str
future_operator_loc = len(passed_str)
exp_count_indices = future_operator_loc + 2 # +2 to skip the **
else:
exponent = passed_str[:future_operator_loc]
exp_count_indices = future_operator_loc + 2 # +2 to skip the **
exponent = float(exponent) # These should only ever be numbers, not quantities, let error occur if they aren't
if exponent.is_integer(): # Method of float
exponent = int(exponent)
return exponent, exp_count_indices
# Loop through a given string level, returns how many indicies of the string it got through
last_char_loop = 0
number_pass_string = len(passed_str)
last_operator = None
final_quantity = None
# Close Parenthisis flag
paren_closed = False
while last_char_loop < number_pass_string:
next_char_loop = find_operator(passed_str[last_char_loop:]) + last_char_loop
next_char = passed_str[next_char_loop]
# Figure out what the operator is
if (next_char_loop == number_pass_string - 1 and (next_char != ')')) or (next_char_loop == 0 and next_char != '(' and next_char != ')'):
# Case of no new operators found
argument = passed_str[last_char_loop:]
else:
argument = passed_str[last_char_loop:next_char_loop]
# Strip leading/trailing spaces
argument = argument.strip(' ')
# Determine if argument is a unit
try:
arg_unit = getattr(unit, argument)
arg_type = 'unit'
except Exception as e:
# Assume its float
try:
arg_unit = float(argument)
arg_type = 'float'
except: # Usually empty string
if argument == '':
arg_unit = None
arg_type = 'None'
else:
raise e # Raise the syntax error
# See if we are at the end
augment = None
count_indices = 1 # How much to offset by to move past operator
if next_char_loop != number_pass_string:
next_operator = passed_str[next_char_loop]
if next_operator == '*':
try: # Exponent
if passed_str[next_char_loop+1] == '*':
exponent, exponent_offset = exponent_unit(passed_str[next_char_loop+2:])
try:
next_char_loop += exponent_offset
# Set the actual next operator (Does not handle nested **)
next_operator = passed_str[next_char_loop]
except IndexError:
# End of string
next_operator = None
# Apply exponent
arg_unit **= exponent
except:
pass
# Check for parenthises
if next_operator == '(':
augment, augment_type, count_indices = nested_string(passed_str[next_char_loop+1:])
count_indices += 1 # add 1 more to offset the '(' itself
elif next_operator == ')':
paren_closed = True
else:
# Case of no found operators
next_operator = None
# Handle the conditions
if (last_operator is None):
if (final_quantity is None) and (arg_type is 'None') and (augment is None):
raise TypeError("Given Quantity could not be interpreted as presented")
elif (final_quantity is None) and (augment is None):
final_quantity = arg_unit
final_type = arg_type
elif (final_quantity is None) and (arg_type is 'None'):
final_quantity = augment
final_type = augment_type
else:
if augment is None:
augment = arg_unit
augment_type = arg_type
if last_operator == '*':
final_quantity *= augment
elif last_operator == '/':
final_quantity /= augment
# Assign type
if augment_type == 'unit':
final_type = 'unit'
elif augment_type == 'float':
final_type = 'float'
last_operator = next_operator
last_char_loop = next_char_loop + count_indices # Set the new position here skipping over processed terms
if paren_closed:
# Determine if the next term is a ** to exponentiate augment
try:
if passed_str[last_char_loop:last_char_loop+2] == '**':
exponent, exponent_offset = exponent_unit(passed_str[last_char_loop+2:])
final_quantity **= exponent
last_char_loop += exponent_offset
except:
pass
break
return final_quantity, final_type, last_char_loop
quantity, final_type, x = nested_string(quantity_str)
return quantity
def process_unit_bearing_str(quantity_str, compatible_units):
"""
Process a unit-bearing string to produce a Quantity.
Parameters
----------
quantity_str : str
A string containing a value with a unit of measure.
compatible_units : simtk.unit.Unit
The result will be checked for compatibility with specified units, and an
exception raised if not compatible.
Returns
-------
quantity : simtk.unit.Quantity
The specified string, returned as a Quantity.
Raises
------
TypeError
If quantity_str does not contains units.
ValueError
If the units attached to quantity_str are incompatible with compatible_units
Examples
--------
>>> process_unit_bearing_str('1.0*micrometers', unit.nanometers)
Quantity(value=1.0, unit=micrometer)
"""
# Convert string of a Quantity to actual Quantity
quantity = quantity_from_string(quantity_str)
# Check to make sure units are compatible with expected units.
try:
quantity.unit.is_compatible(compatible_units)
except:
raise TypeError("String %s does not have units attached." % quantity_str)
# Check that units are compatible with what we expect.
if not quantity.unit.is_compatible(compatible_units):
raise ValueError("Units of %s must be compatible with %s" % (quantity_str,
str(compatible_units)))
# Return unit-bearing quantity.
return quantity
def to_unit_validator(compatible_units):
"""Function generator to test unit bearing strings with Schema."""
def _to_unit_validator(quantity_str):
return process_unit_bearing_str(quantity_str, compatible_units)
return _to_unit_validator
def generate_signature_schema(func, update_keys=None, exclude_keys=frozenset()):
"""Generate a dictionary to test function signatures with Schema.
Parameters
----------
func : function
The function used to build the schema.
update_keys : dict
Keys in here have priority over automatic generation. It can be
used to make an argument mandatory, or to use a specific validator.
exclude_keys : list-like
Keys in here are ignored and not included in the schema.
Returns
-------
func_schema : dict
The dictionary to be used as Schema type. Contains all keyword
variables in the function signature as optional argument with
the default type as validator. Unit bearing strings are converted.
Argument with default None are always accepted. Camel case
parameters in the function are converted to underscore style.
Examples
--------
>>> from schema import Schema
>>> def f(a, b, camelCase=True, none=None, quantity=3.0*unit.angstroms):
... pass
>>> f_dict = generate_signature_schema(f, exclude_keys=['quantity'])
>>> print(isinstance(f_dict, dict))
True
>>> # Print (key, values) in the correct order
>>> print(sorted(listitems(f_dict), key=lambda x: x[1]))
[(Optional('camel_case'), <type 'bool'>), (Optional('none'), <type 'object'>)]
>>> f_schema = Schema(generate_signature_schema(f))
>>> f_schema.validate({'quantity': '1.0*nanometer'})
{'quantity': Quantity(value=1.0, unit=nanometer)}
"""
if update_keys is None:
update_keys = {}
func_schema = {}
args, _, _, defaults = inspect.getargspec(unwrap_py2(func))
# Check keys that must be excluded from first pass
exclude_keys = set(exclude_keys)
exclude_keys.update(update_keys)
exclude_keys.update({k._schema for k in update_keys if isinstance(k, Optional)})
# Transform camelCase to underscore
# TODO: Make sure this is working from the Py3.X conversion
args = [camelcase_to_underscore(arg) for arg in args ]
# Build schema
for arg, default_value in zip(args[-len(defaults):], defaults):
if arg not in exclude_keys: # User defined keys are added later
if default_value is None: # None defaults are always accepted
validator = object
elif isinstance(default_value, unit.Quantity): # Convert unit strings
validator = Use(to_unit_validator(default_value.unit))
else:
validator = type(default_value)
func_schema[Optional(arg)] = validator
# Add special user keys
func_schema.update(update_keys)
return func_schema
def get_keyword_args(function):
"""Inspect function signature and return keyword args with their default values.
Parameters
----------
function : function
The function to interrogate.
Returns
-------
kwargs : dict
A dictionary 'keyword argument' -> 'default value'. The arguments of the
function that do not have a default value will not be included.
"""
argspec = inspect.getargspec(function)
kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]
kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}
return kwargs
def validate_parameters(parameters, template_parameters, check_unknown=False,
process_units_str=False, float_to_int=False,
ignore_none=True, special_conversions=None):
"""Utility function for parameters and options validation.
Use the given template to filter the given parameters and infer their expected
types. Perform various automatic conversions when requested. If the template is
None, the parameter to validate is not checked for type compatibility.
Parameters
----------
parameters : dict
The parameters to validate.
template_parameters : dict
The template used to filter the parameters and infer the types.
check_unknown : bool
If True, an exception is raised when parameters contain a key that is not
contained in template_parameters.
process_units_str: bool
If True, the function will attempt to convert the strings whose template
type is simtk.unit.Quantity.
float_to_int : bool
If True, floats in parameters whose template type is int are truncated.
ignore_none : bool
If True, the function do not process parameters whose value is None.
special_conversions : dict
Contains a coverter function with signature convert(arg) that must be
applied to the parameters specified by the dictionary key.
Returns
-------
validate_par : dict
The converted parameters that are contained both in parameters and
template_parameters.
Raises
------
TypeError
If check_unknown is True and there are parameters not in template_parameters.
ValueError
If a parameter has an incompatible type with its template parameter.
Examples
--------
Create the template parameters
>>> template_pars = dict()
>>> template_pars['bool'] = True
>>> template_pars['int'] = 2
>>> template_pars['unspecified'] = None # this won't be checked for type compatibility
>>> template_pars['to_be_converted'] = [1, 2, 3]
>>> template_pars['length'] = 2.0 * unit.nanometers
Now the parameters to validate
>>> input_pars = dict()
>>> input_pars['bool'] = None # this will be skipped with ignore_none=True
>>> input_pars['int'] = 4.3 # this will be truncated to 4 with float_to_int=True
>>> input_pars['unspecified'] = 'input' # this can be of any type since the template is None
>>> input_pars['to_be_converted'] = {'key': 3}
>>> input_pars['length'] = '1.0*nanometers'
>>> input_pars['unknown'] = 'test' # this will be silently filtered if check_unkown=False
Validate the parameters
>>> valid = validate_parameters(input_pars, template_pars, process_units_str=True,
... float_to_int=True, special_conversions={'to_be_converted': list})
>>> import pprint
>>> pprint.pprint(valid)
{'bool': None,
'int': 4,
'length': Quantity(value=1.0, unit=nanometer),
'to_be_converted': ['key'],
'unspecified': 'input'}
"""
if special_conversions is None:
special_conversions = {}
# Create validated parameters
validated_par = {par: parameters[par] for par in parameters
if par in template_parameters}
# Check for unknown parameters
if check_unknown and len(validated_par) < len(parameters):
diff = set(parameters) - set(template_parameters)
raise TypeError("found unknown parameter {}".format(', '.join(diff)))
for par, value in listitems(validated_par):
templ_value = template_parameters[par]
# Convert requested types
if ignore_none and value is None:
continue
# Special conversions have priority
if par in special_conversions:
converter_func = special_conversions[par]
validated_par[par] = converter_func(value)
else: # Automatic conversions and type checking
# bool inherits from int in Python so we can't simply use isinstance
if float_to_int and type(templ_value) is int:
validated_par[par] = int(value)
elif process_units_str and isinstance(templ_value, unit.Quantity):
validated_par[par] = process_unit_bearing_str(value, templ_value.unit)
# Check for incompatible types
if type(validated_par[par]) != type(templ_value) and templ_value is not None:
raise ValueError("parameter {}={} is incompatible with {}".format(
par, validated_par[par], template_parameters[par]))
return validated_par
# ==============================================================================
# Stuff to move to openmoltools/ParmEd when they'll be stable
# ==============================================================================
class Mol2File(object):
"""Wrapper of ParmEd mol2 parser for easy manipulation of mol2 files.
This is not efficient as every operation access the file. The purpose
of this class is simply to provide a shortcut to read and write the mol2
file with a one-liner. If you need to do multiple operations before
saving the file, use ParmEd directly.
This works only for single-structure mol2 files.
"""
def __init__(self, file_path):
"""Constructor.
Parameters
-----------
file_path : str
Path to the mol2 path.
"""
self._file_path = file_path
@property
def resname(self):
residue = parmed.load_file(self._file_path)
return residue.name
@resname.setter
def resname(self, value):
residue = parmed.load_file(self._file_path)
residue.name = value
parmed.formats.Mol2File.write(residue, self._file_path)
@property
def net_charge(self):
residue = parmed.load_file(self._file_path)
return sum(a.charge for a in residue.atoms)
@net_charge.setter
def net_charge(self, value):
residue = parmed.load_file(self._file_path)
residue.fix_charges(to=value, precision=6)
parmed.formats.Mol2File.write(residue, self._file_path)
# OpenEye functions
# ------------------
def is_openeye_installed():
try:
from openeye import oechem
from openeye import oequacpac
from openeye import oeiupac
from openeye import oeomega
if not (oechem.OEChemIsLicensed() and oequacpac.OEQuacPacIsLicensed()
and oeiupac.OEIUPACIsLicensed() and oeomega.OEOmegaIsLicensed()):
raise ImportError
except ImportError:
return False
return True
def read_oe_molecule(file_path, conformer_idx=None):
from openeye import oechem
# Open input file stream
ifs = oechem.oemolistream()
if not ifs.open(file_path):
oechem.OEThrow.Fatal('Unable to open {}'.format(file_path))
# Read all conformations
for mol in ifs.GetOEMols():
try:
molecule.NewConf(mol)
except UnboundLocalError:
molecule = oechem.OEMol(mol)
# Select conformation of interest
if conformer_idx is not None:
if molecule.NumConfs() <= conformer_idx:
raise ValueError('conformer_idx {} out of range'.format(conformer_idx))
molecule = oechem.OEGraphMol(molecule.GetConf(oechem.OEHasConfIdx(conformer_idx)))
return molecule
def write_oe_molecule(oe_mol, file_path, mol2_resname=None):
"""Write all conformations in a file and automatically detects format."""
from openeye import oechem
# Get correct OpenEye format
extension = os.path.splitext(file_path)[1][1:] # remove dot
oe_format = getattr(oechem, 'OEFormat_' + extension.upper())
# Open stream and write molecule
ofs = oechem.oemolostream()
ofs.SetFormat(oe_format)
if not ofs.open(file_path):
oechem.OEThrow.Fatal('Unable to create {}'.format(file_path))
oechem.OEWriteMolecule(ofs, oe_mol)
ofs.close()
# If this is a mol2 file, we need to replace the resname
# TODO when you merge to openmoltools, incapsulate this and add to molecule_to_mol2()
if mol2_resname is not None and extension == 'mol2':
with open(file_path, 'r') as f:
lines = f.readlines()
lines = [line.replace('<0>', mol2_resname) for line in lines]
with open(file_path, 'w') as f:
f.writelines(lines)
def get_oe_mol_positions(molecule, conformer_idx=0):
from openeye import oechem
# Extract correct conformer
if conformer_idx > 0:
try:
if molecule.NumConfs() <= conformer_idx:
raise UnboundLocalError # same error message
molecule = oechem.OEGraphMol(molecule.GetConf(oechem.OEHasConfIdx(conformer_idx)))
except UnboundLocalError:
raise ValueError('conformer_idx {} out of range'.format(conformer_idx))
# Extract positions
oe_coords = oechem.OEFloatArray(3)
molecule_pos = np.zeros((molecule.NumAtoms(), 3))
for i, atom in enumerate(molecule.GetAtoms()):
molecule.GetCoords(atom, oe_coords)
molecule_pos[i] = oe_coords
return molecule_pos
def set_oe_mol_positions(molecule, positions):
for i, atom in enumerate(molecule.GetAtoms()):
molecule.SetCoords(atom, positions[i])
class TLeap:
"""Programmatic interface to write and run tLeap scripts.
To avoid problems with special characters in file paths, the class run the
tleap script in a temporary folder with hardcoded names for files and then
copy the output files in their respective folders.
"""
@property
def script(self):
return self._script.format(**self._file_paths) + '\nquit\n'
def __init__(self):
self._script = ''
self._file_paths = {} # paths of input/output files to copy in/from temp dir
self._loaded_parameters = set() # parameter files already loaded
def add_commands(self, *args):
for command in args:
self._script += command + '\n'
def load_parameters(self, *args):
for par_file in args:
# Check that this is not already loaded
if par_file in self._loaded_parameters:
continue
# Check whether this is a user file or a tleap file, and
# update list of input files to copy in temporary folder before run
if os.path.isfile(par_file):
local_name = 'moli{}'.format(len(self._file_paths))
self._file_paths[local_name] = par_file
local_name = '{' + local_name + '}'
else: # tleap file
local_name = par_file
# use loadAmberParams if this is a frcmod file and source otherwise
base_name = os.path.basename(par_file)
extension = os.path.splitext(base_name)[1]
if 'frcmod' in base_name:
self.add_commands('loadAmberParams ' + local_name)
elif extension == '.off' or extension == '.lib':
self.add_commands('loadOff ' + local_name)
else:
self.add_commands('source ' + par_file)
# Update loaded parameters cache
self._loaded_parameters.add(par_file)
def load_group(self, name, file_path):
extension = os.path.splitext(file_path)[1]
if extension == '.mol2':
load_command = 'loadMol2'
elif extension == '.pdb':
load_command = 'loadPdb'
else:
raise ValueError('cannot load format {} in tLeap'.format(extension))
local_name = 'moli{}'.format(len(self._file_paths))
self.add_commands('{} = {} {{{}}}'.format(name, load_command, local_name))
# Update list of input files to copy in temporary folder before run
self._file_paths[local_name] = file_path
def combine(self, group, *args):
components = ' '.join(args)
self.add_commands('{} = combine {{{{ {} }}}}'.format(group, components))
def add_ions(self, unit, ion, num_ions=0):
self.add_commands('addIons2 {} {} {}'.format(unit, ion, num_ions))
def solvate(self, group, water_model, clearance):
self.add_commands('solvateBox {} {} {} iso'.format(group, water_model,
str(clearance)))
def save_group(self, group, output_path):
file_name = os.path.basename(output_path)
file_name, extension = os.path.splitext(file_name)
local_name = 'molo{}'.format(len(self._file_paths))
# Update list of output files to copy from temporary folder after run
self._file_paths[local_name] = output_path
# Add command
if extension == '.prmtop' or extension == '.inpcrd':
local_name2 = 'molo{}'.format(len(self._file_paths))
command = 'saveAmberParm ' + group + ' {{{}}} {{{}}}'
# Update list of output files with the one not explicit
if extension == '.inpcrd':
extension2 = '.prmtop'
command = command.format(local_name2, local_name)
else:
extension2 = '.inpcrd'
command = command.format(local_name, local_name2)
output_path2 = os.path.join(os.path.dirname(output_path), file_name + extension2)
self._file_paths[local_name2] = output_path2
self.add_commands(command)
elif extension == '.pdb':
self.add_commands('savePDB {} {{{}}}'.format(group, local_name))
else:
raise ValueError('cannot export format {} from tLeap'.format(extension[1:]))
def transform(self, unit, transformation):
"""Transformation is an array-like representing the affine transformation matrix."""
command = 'transform {} {}'.format(unit, transformation)
command = command.replace(r'[', '{{').replace(r']', '}}')
command = command.replace('\n', '').replace(' ', ' ')
self.add_commands(command)
def new_section(self, comment):
self.add_commands('\n# ' + comment)
def export_script(self, file_path):
with open(file_path, 'w') as f:
f.write(self.script)
def run(self):
"""Run script and return warning messages in leap log file."""
# Transform paths in absolute paths since we'll change the working directory
input_files = {local + os.path.splitext(path)[1]: os.path.abspath(path)
for local, path in listitems(self._file_paths) if 'moli' in local}
output_files = {local + os.path.splitext(path)[1]: os.path.abspath(path)
for local, path in listitems(self._file_paths) if 'molo' in local}
# Resolve all the names in the script
local_files = {local: local + os.path.splitext(path)[1]
for local, path in listitems(self._file_paths)}
script = self._script.format(**local_files) + 'quit\n'
with mdtraj.utils.enter_temp_directory():
# Copy input files
for local_file, file_path in listitems(input_files):
shutil.copy(file_path, local_file)
# Save script and run tleap
with open('leap.in', 'w') as f:
f.write(script)
leap_output = subprocess.check_output(['tleap', '-f', 'leap.in']).decode()
# Save leap.log in directory of first output file
if len(output_files) > 0:
#Get first output path in Py 3.X way that is also thread-safe
for val in listvalues(output_files):
first_output_path = val
break
first_output_name = os.path.basename(first_output_path).split('.')[0]
first_output_dir = os.path.dirname(first_output_path)
log_path = os.path.join(first_output_dir, first_output_name + '.leap.log')
shutil.copy('leap.log', log_path)
# Copy back output files. If something goes wrong, some files may not exist
error_msg = ''
try:
for local_file, file_path in listitems(output_files):
shutil.copy(local_file, file_path)
except IOError:
error_msg = "Could not create one of the system files."
# Look for errors in log that don't raise CalledProcessError
error_patterns = ['Argument #\d+ is type \S+ must be of type: \S+']
for pattern in error_patterns:
m = re.search(pattern, leap_output)
if m is not None:
error_msg = m.group(0)
break
if error_msg != '':
raise RuntimeError(error_msg + ' Check log file {}'.format(log_path))
# Check for and return warnings
return re.findall('WARNING: (.+)', leap_output)
#=============================================================================================
# Python 2/3 compatability
#=============================================================================================
"""
Generate same behavior for dict.item in both versions of Python
Avoids external dependancies on future.utils or six
"""
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
def dictiter(d):
return d.items()
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
def dictiter(d):
return d.iteritems()
#=============================================================================================
# Main and tests
#=============================================================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
| lgpl-3.0 |
WarrenWeckesser/scipy | scipy/stats/morestats.py | 4 | 128655 | from __future__ import annotations
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray, zeros,
arange, sort, amin, amax, atleast_1d, sqrt, array,
compress, pi, exp, ravel, count_nonzero, sin, cos,
arctan2, hypot)
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats, _contains_nan, _normtest_finish
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
from ._hypotests import _get_wilcoxon_distr
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
'yeojohnson_normplot'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, density=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data.
vdist : "frozen" distribution object
Distribution object representing the variance of the data.
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data.
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> from numpy.random import default_rng
>>> rng = default_rng()
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rng.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194 # random
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""Return an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.empty(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, str):
try:
dist = getattr(distributions, dist)
except AttributeError as e:
raise ValueError("%s is not a valid distribution name" % dist) from e
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots."""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles.
If given and `fit` is True, also plots the least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> rng = np.random.default_rng()
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample, random_state=rng)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample, random_state=rng)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2), random_state=rng).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample, random_state=rng)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500, random_state=rng)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
if fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit:
# perform a linear least squares fit.
slope, intercept, r, prob, _ = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo')
if fit:
plot.plot(osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""Calculate the shape parameter that maximizes the PPCC.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See Also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack,
args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape
parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore
particularly useful in practice.
Parameters
----------
x : array_like
Input array.
a, b : scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See Also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5,
... size=10000, random_state=rng) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> rng = np.random.default_rng()
>>> x = stats.loggamma.rvs(5, loc=10, size=1000, random_state=rng)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
logdata = np.log(data)
# Compute the variance of the transformed data.
if lmb == 0:
variance = np.var(logdata, axis=0)
else:
# Transform without the constant offset 1/lmb. The offset does
# not effect the variance, and the subtraction of the offset can
# lead to loss of precision.
variance = np.var(data**lmb / lmb, axis=0)
return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance)
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None, optimizer=None):
r"""Return a dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Must be positive 1-dimensional. Must not be constant.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
optimizer : callable, optional
If `lmbda` is None, `optimizer` is the scalar optimizer used to find
the value of `lmbda` that minimizes the negative log-likelihood
function. `optimizer` is a callable that accepts one argument:
fun : callable
The objective function, which evaluates the negative
log-likelihood function at a provided value of `lmbda`
and returns an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal value of
`lmbda` in an attribute `x`.
See the example in `boxcox_normmax` or the documentation of
`scipy.optimize.minimize_scalar` for more information.
If `lmbda` is not None, `optimizer` is ignored.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda != 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Data must be 1-dimensional.")
if x.size == 0:
return x
if np.all(x == x[0]):
raise ValueError("Data must not be constant.")
if np.any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle', optimizer=optimizer)
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=None, method='pearsonr', optimizer=None):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional, default (-2.0, 2.0)
The starting interval for a downhill bracket search for the default
`optimize.brent` solver. Note that this is in most cases not
critical; the final result is allowed to be outside this bracket.
If `optimizer` is passed, `brack` must be None.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
optimizer : callable, optional
`optimizer` is a callable that accepts one argument:
fun : callable
The objective function to be optimized. `fun` accepts one argument,
the Box-Cox transform parameter `lmbda`, and returns the negative
log-likelihood function at the provided value. The job of `optimizer`
is to find the value of `lmbda` that minimizes `fun`.
and returns an object, such as an instance of
`scipy.optimize.OptimizeResult`, which holds the optimal value of
`lmbda` in an attribute `x`.
See the example below or the documentation of
`scipy.optimize.minimize_scalar` for more information.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot, scipy.optimize.minimize_scalar
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We can generate some data and determine the optimal ``lmbda`` in various
ways:
>>> rng = np.random.default_rng()
>>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
1.4613865614008015
>>> lmax_pearsonr
1.6685004886804342
>>> stats.boxcox_normmax(x, method='all')
array([1.66850049, 1.46138656])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
Alternatively, we can define our own `optimizer` function. Suppose we
are only interested in values of `lmbda` on the interval [6, 7], we
want to use `scipy.optimize.minimize_scalar` with ``method='bounded'``,
and we want to use tighter tolerances when optimizing the log-likelihood
function. To do this, we define a function that accepts positional argument
`fun` and uses `scipy.optimize.minimize_scalar` to minimize `fun` subject
to the provided bounds and tolerances:
>>> from scipy import optimize
>>> options = {'xatol': 1e-12} # absolute tolerance on `x`
>>> def optimizer(fun):
... return optimize.minimize_scalar(fun, bounds=(6, 7),
... method="bounded", options=options)
>>> stats.boxcox_normmax(x, optimizer=optimizer)
6.000...
"""
# If optimizer is not given, define default 'brent' optimizer.
if optimizer is None:
# Set default value for `brack`.
if brack is None:
brack = (-2.0, 2.0)
def _optimizer(func, args):
return optimize.brent(func, args=args, brack=brack)
# Otherwise check optimizer.
else:
if not callable(optimizer):
raise ValueError("`optimizer` must be a callable")
if brack is not None:
raise ValueError("`brack` must be None if `optimizer` is given")
# `optimizer` is expected to return a `OptimizeResult` object, we here
# get the solution to the optimization problem.
def _optimizer(func, args):
def func_wrapped(x):
return func(x, *args)
return getattr(optimizer(func_wrapped), 'x', None)
def _pearsonr(x):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return _optimizer(_eval_pearsonr, args=(xvals, x))
def _mle(x):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return _optimizer(_eval_mle, args=(x,))
def _all(x):
maxlog = np.empty(2, dtype=float)
maxlog[0] = _pearsonr(x)
maxlog[1] = _mle(x)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
res = optimfunc(x)
if res is None:
message = ("`optimizer` must return an object containing the optimal "
"`lmbda` in attribute `x`")
raise ValueError(message)
return res
def _normplot(method, x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox or Yeo-Johnson normality plot,
optionally show it.
See `boxcox_normplot` or `yeojohnson_normplot` for details.
"""
if method == 'boxcox':
title = 'Box-Cox Normality Plot'
transform_func = boxcox
else:
title = 'Yeo-Johnson Normality Plot'
transform_func = yeojohnson
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the square root of correlation coefficient
# of transformed x
z = transform_func(x, lmbda=val)
_, (_, _, r) = probplot(z, dist='norm', fit=True)
ppcc[i] = r
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\\lambda$',
ylabel='Prob Plot Corr. Coef.',
title=title)
return lmbdas, ppcc
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('boxcox', x, la, lb, plot, N)
def yeojohnson(x, lmbda=None):
r"""Return a dataset transformed by a Yeo-Johnson power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : float, optional
If ``lmbda`` is ``None``, find the lambda that maximizes the
log-likelihood function and return it as the second output argument.
Otherwise the transformation is done for the given value.
Returns
-------
yeojohnson: ndarray
Yeo-Johnson power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
See Also
--------
probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox
Notes
-----
The Yeo-Johnson transform is given by::
y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0
log(x + 1), for x >= 0, lmbda = 0
-((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2
-log(-x + 1), for x < 0, lmbda = 2
Unlike `boxcox`, `yeojohnson` does not require the input data to be
positive.
.. versionadded:: 1.2.0
References
----------
I. Yeo and R.A. Johnson, "A New Family of Power Transformations to
Improve Normality or Symmetry", Biometrika 87.4 (2000):
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `yeojohnson` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, lmbda = stats.yeojohnson(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Yeo-Johnson transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError('Yeo-Johnson transformation is not defined for '
'complex numbers.')
if np.issubdtype(x.dtype, np.integer):
x = x.astype(np.float64, copy=False)
if lmbda is not None:
return _yeojohnson_transform(x, lmbda)
# if lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = yeojohnson_normmax(x)
y = _yeojohnson_transform(x, lmax)
return y, lmax
def _yeojohnson_transform(x, lmbda):
"""Returns `x` transformed by the Yeo-Johnson power transform with given
parameter `lmbda`.
"""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def yeojohnson_llf(lmb, data):
r"""The yeojohnson log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Yeo-Johnson transformation. See `yeojohnson` for
details.
data : array_like
Data to calculate Yeo-Johnson log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float
Yeo-Johnson log-likelihood of `data` given `lmb`.
See Also
--------
yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax
Notes
-----
The Yeo-Johnson log-likelihood function is defined here as
.. math::
llf = -N/2 \log(\hat{\sigma}^2) + (\lambda - 1)
\sum_i \text{ sign }(x_i)\log(|x_i| + 1)
where :math:`\hat{\sigma}^2` is estimated variance of the the Yeo-Johnson
transformed input data ``x``.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
Generate some random variates and calculate Yeo-Johnson log-likelihood
values for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.yeojohnson_llf(lmbda, x)
Also find the optimal lmbda value with `yeojohnson`:
>>> x_most_normal, lmbda_optimal = stats.yeojohnson(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Yeo-Johnson log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `yeojohnson` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.yeojohnson(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
n_samples = data.shape[0]
if n_samples == 0:
return np.nan
trans = _yeojohnson_transform(data, lmb)
loglike = -n_samples / 2 * np.log(trans.var(axis=0))
loglike += (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0)
return loglike
def yeojohnson_normmax(x, brack=(-2, 2)):
"""Compute optimal Yeo-Johnson transform parameter.
Compute optimal Yeo-Johnson transform parameter for input data, using
maximum likelihood estimation.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
Returns
-------
maxlog : float
The optimal transform parameter found.
See Also
--------
yeojohnson, yeojohnson_llf, yeojohnson_normplot
Notes
-----
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some data and determine optimal ``lmbda``
>>> rng = np.random.default_rng()
>>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5
>>> lmax = stats.yeojohnson_normmax(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax, color='r')
>>> plt.show()
"""
def _neg_llf(lmbda, data):
return -yeojohnson_llf(lmbda, data)
return optimize.brent(_neg_llf, brack=brack, args=(x,))
def yeojohnson_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Yeo-Johnson normality plot, optionally show it.
A Yeo-Johnson normality plot shows graphically what the best
transformation parameter is to use in `yeojohnson` to obtain a
distribution that is close to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to
`yeojohnson` for Yeo-Johnson transformations. These are also the
limits of the horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Yeo-Johnson transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
.. versionadded:: 1.2.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Yeo-Johnson plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.yeojohnson(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
return _normplot('yeojohnson', x, la, lb, plot, N)
ShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue'))
def shapiro(x):
"""Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
Returns
-------
statistic : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.norm.rvs(loc=5, scale=3, size=100, random_state=rng)
>>> shapiro_test = stats.shapiro(x)
>>> shapiro_test
ShapiroResult(statistic=0.9813305735588074, pvalue=0.16855233907699585)
>>> shapiro_test.statistic
0.9813305735588074
>>> shapiro_test.pvalue
0.16855233907699585
"""
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
a = zeros(N, 'f')
init = 0
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
return ShapiroResult(w, pw)
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of the American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""Anderson-Darling test for data coming from a particular distribution.
The Anderson-Darling test tests the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
Array of sample data.
dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1'}, optional
The type of distribution to test against. The default is 'norm'.
The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the
same distribution.
Returns
-------
statistic : float
The Anderson-Darling test statistic.
critical_values : list
The critical values for this distribution.
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
See Also
--------
kstest : The Kolmogorov-Smirnov test for goodness-of-fit.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If the returned statistic is larger than these critical values then
for the corresponding significance level, the null hypothesis that
the data come from the chosen distribution can be rejected.
The returned statistic is referred to as 'A2' in the references.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%,
0.5%, 0.1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected. The value is floored / capped at
0.1% / 25%.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
The critical values corresponding to the significance levels from 0.01
to 0.25 are taken from [1]_. p-values are floored / capped
at 0.1% / 25%. Since the range of critical values might be extended in
future releases, it is recommended not to test ``p == 0.25``, but rather
``p >= 0.25`` (analogously for the lower bound).
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.2%:
>>> stats.anderson_ksamp([rng.normal(size=50),
... rng.normal(loc=0.5, size=30)])
(1.974403288713695,
array([0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]),
0.04991293614572478)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The reported p-value (25%) has been capped and
may not be very accurate (since it corresponds to the value 0.449
whereas the statistic is -0.731):
>>> stats.anderson_ksamp([rng.normal(size=50),
... rng.normal(size=30), rng.normal(size=20)])
(-0.29103725200789504,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856,
4.07210043, 5.56419101]),
0.25)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if np.any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154])
critical = b0 + b1 / math.sqrt(m) + b2 / m
sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001])
if A2 < critical.min():
p = sig.max()
warnings.warn("p-value capped: true value larger than {}".format(p),
stacklevel=2)
elif A2 > critical.max():
p = sig.min()
warnings.warn("p-value floored: true value smaller than {}".format(p),
stacklevel=2)
else:
# interpolation of probit of significance level
pf = np.polyfit(critical, log(sig), 2)
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
class _ABW:
"""Distribution of Ansari-Bradley W-statistic under the null hypothesis."""
# TODO: calculate exact distribution considering ties
# We could avoid summing over more than half the frequencies,
# but inititally it doesn't seem worth the extra complexity
def __init__(self):
"""Minimal initializer."""
self.m = None
self.n = None
self.astart = None
self.total = None
self.freqs = None
def _recalc(self, n, m):
"""When necessary, recalculate exact distribution."""
if n != self.n or m != self.m:
self.n, self.m = n, m
# distribution is NOT symmetric when m + n is odd
# n is len(x), m is len(y), and ratio of scales is defined x/y
astart, a1, _ = statlib.gscale(n, m)
self.astart = astart # minimum value of statistic
# Exact distribution of test statistic under null hypothesis
# expressed as frequencies/counts/integers to maintain precision.
# Stored as floats to avoid overflow of sums.
self.freqs = a1.astype(np.float64)
self.total = self.freqs.sum() # could calculate from m and n
# probability mass is self.freqs / self.total;
def pmf(self, k, n, m):
"""Probability mass function."""
self._recalc(n, m)
# The convention here is that PMF at k = 12.5 is the same as at k = 12,
# -> use `floor` in case of ties.
ind = np.floor(k - self.astart).astype(int)
return self.freqs[ind] / self.total
def cdf(self, k, n, m):
"""Cumulative distribution function."""
self._recalc(n, m)
# Null distribution derived without considering ties is
# approximate. Round down to avoid Type I error.
ind = np.ceil(k - self.astart).astype(int)
return self.freqs[:ind+1].sum() / self.total
def sf(self, k, n, m):
"""Survival function."""
self._recalc(n, m)
# Null distribution derived without considering ties is
# approximate. Round down to avoid Type I error.
ind = np.floor(k - self.astart).astype(int)
return self.freqs[ind:].sum() / self.total
# Maintain state for faster repeat calls to ansari w/ method='exact'
_abw_state = _ABW()
def ansari(x, y, alternative='two-sided'):
"""Perform the Ansari-Bradley test for equal scale parameters.
The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test
for the equality of the scale parameter of the distributions
from which two samples were drawn. The null hypothesis states that
the ratio of the scale of the distribution underlying `x` to the scale
of the distribution underlying `y` is 1.
Parameters
----------
x, y : array_like
Arrays of sample data.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the ratio of scales is not equal to 1.
* 'less': the ratio of scales is less than 1.
* 'greater': the ratio of scales is greater than 1.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The Ansari-Bradley test statistic.
pvalue : float
The p-value of the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for
dispersions, Annals of Mathematical Statistics, 31, 1174-1189.
.. [2] Sprent, Peter and N.C. Smeeton. Applied nonparametric
statistical methods. 3rd ed. Chapman and Hall/CRC. 2001.
Section 5.8.2.
.. [3] Nathaniel E. Helwig "Nonparametric Dispersion and Equality
Tests" at http://users.stat.umn.edu/~helwig/notes/npde-Notes.pdf
Examples
--------
>>> from scipy.stats import ansari
>>> rng = np.random.default_rng()
For these examples, we'll create three random data sets. The first
two, with sizes 35 and 25, are drawn from a normal distribution with
mean 0 and standard deviation 2. The third data set has size 25 and
is drawn from a normal distribution with standard deviation 1.25.
>>> x1 = rng.normal(loc=0, scale=2, size=35)
>>> x2 = rng.normal(loc=0, scale=2, size=25)
>>> x3 = rng.normal(loc=0, scale=1.25, size=25)
First we apply `ansari` to `x1` and `x2`. These samples are drawn
from the same distribution, so we expect the Ansari-Bradley test
should not lead us to conclude that the scales of the distributions
are different.
>>> ansari(x1, x2)
AnsariResult(statistic=541.0, pvalue=0.9762532927399098)
With a p-value close to 1, we cannot conclude that there is a
significant difference in the scales (as expected).
Now apply the test to `x1` and `x3`:
>>> ansari(x1, x3)
AnsariResult(statistic=425.0, pvalue=0.0003087020407974518)
The probability of observing such an extreme value of the statistic
under the null hypothesis of equal scales is only 0.03087%. We take this
as evidence against the null hypothesis in favor of the alternative:
the scales of the distributions from which the samples were drawn
are not equal.
We can use the `alternative` parameter to perform a one-tailed test.
In the above example, the scale of `x1` is greater than `x3` and so
the ratio of scales of `x1` and `x3` is greater than 1. This means
that the p-value when ``alternative='greater'`` should be near 0 and
hence we should be able to reject the null hypothesis:
>>> ansari(x1, x3, alternative='greater')
AnsariResult(statistic=425.0, pvalue=0.0001543510203987259)
As we can see, the p-value is indeed quite low. Use of
``alternative='less'`` should thus yield a large p-value:
>>> ansari(x1, x3, alternative='less')
AnsariResult(statistic=425.0, pvalue=0.9998643258449039)
"""
if alternative not in {'two-sided', 'greater', 'less'}:
raise ValueError("'alternative' must be 'two-sided',"
" 'greater', or 'less'.")
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
if alternative == 'two-sided':
pval = 2.0 * np.minimum(_abw_state.cdf(AB, n, m),
_abw_state.sf(AB, n, m))
elif alternative == 'greater':
# AB statistic is _smaller_ when ratio of scales is larger,
# so this is the opposite of the usual calculation
pval = _abw_state.cdf(AB, n, m)
else:
pval = _abw_state.sf(AB, n, m)
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
# Small values of AB indicate larger dispersion for the x sample.
# Large values of AB indicate larger dispersion for the y sample.
# This is opposite to the way we define the ratio of scales. see [1]_.
z = (mnAB - AB) / sqrt(varAB)
z, pval = _normtest_finish(z, alternative)
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""Perform Bartlett's test for equal variances.
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. Only 1d arrays are accepted, they may have
different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power
([3]_).
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
Examples
--------
Test whether or not the lists `a`, `b` and `c` come from populations
with equal variances.
>>> from scipy.stats import bartlett
>>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
>>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
>>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
>>> stat, p = bartlett(a, b, c)
>>> p
1.1254782518834628e-05
The very small p-value suggests that the populations do not have equal
variances.
This is not surprising, given that the sample variance of `b` is much
larger than that of `a` and `c`:
>>> [np.var(x, ddof=1) for x in [a, b, c]]
[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
"""
# Handle empty input and input that is not 1d
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
if np.asanyarray(a).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = np.empty(k)
ssq = np.empty(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, center='median', proportiontocut=0.05):
"""Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths. Only one-dimensional
samples are accepted.
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
The test version using the mean was proposed in the original article
of Levene ([2]_) while the median and trimmed mean have been studied by
Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe
test.
References
----------
.. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
Examples
--------
Test whether or not the lists `a`, `b` and `c` come from populations
with equal variances.
>>> from scipy.stats import levene
>>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
>>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
>>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
>>> stat, p = levene(a, b, c)
>>> p
0.002431505967249681
The small p-value suggests that the populations do not have equal
variances.
This is not surprising, given that the sample variance of `b` is much
larger than that of `a` and `c`:
>>> [np.var(x, ddof=1) for x in [a, b, c]]
[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
"""
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("center must be 'mean', 'median' or 'trimmed'.")
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
# check for 1d input
for j in range(k):
if np.asanyarray(args[j]).ndim > 1:
raise ValueError('Samples must be one-dimensional.')
Ni = np.empty(k)
Yci = np.empty(k, 'd')
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = np.empty(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""Perform a test that the probability of success is p.
Note: `binom_test` is deprecated; it is recommended that `binomtest`
be used instead.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : int or array_like
The number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : int
The number of trials. This is ignored if x gives both the
number of successes and failures.
p : float, optional
The hypothesized probability of success. ``0 <= p <= 1``. The
default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Binomial_test
Examples
--------
>>> from scipy import stats
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> stats.binom_test(3, n=15, p=0.1, alternative='greater')
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
"""
x = atleast_1d(x).astype(np.int_)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)]
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, center='median', proportiontocut=0.05):
"""Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
Examples
--------
Test whether or not the lists `a`, `b` and `c` come from populations
with equal variances.
>>> from scipy.stats import fligner
>>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
>>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
>>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
>>> stat, p = fligner(a, b, c)
>>> p
0.00450826080004775
The small p-value suggests that the populations do not have equal
variances.
This is not surprising, given that the sample variance of `b` is much
larger than that of `a` and `c`:
>>> [np.var(x, ddof=1) for x in [a, b, c]]
[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
"""
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("center must be 'mean', 'median' or 'trimmed'.")
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0, alternative="two-sided"):
"""Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the scales of the distributions underlying `x` and `y`
are different.
* 'less': the scale of the distribution underlying `x` is less than
the scale of the distribution underlying `y`.
* 'greater': the scale of the distribution underlying `x` is greater
than the scale of the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x2 = rng.standard_normal((2, 45, 6, 7))
>>> x1 = rng.standard_normal((2, 30, 6, 7))
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
78
Perform the test with different scales:
>>> x1 = rng.standard_normal((2, 30))
>>> x2 = rng.standard_normal((2, 35)) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.76174136, -6.12650783]), array([8.32505043e-09, 8.98287869e-10]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.empty_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
z, pval = _normtest_finish(z, alternative)
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False,
alternative="two-sided", mode='auto'):
"""Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
Either the first set of measurements (in which case ``y`` is the second
set of measurements), or the differences between two sets of
measurements (in which case ``y`` is not to be specified.) Must be
one-dimensional.
y : array_like, optional
Either the second set of measurements (if ``x`` is the first set of
measurements), or not specified (if ``x`` is the differences between
two sets of measurements.) Must be one-dimensional.
zero_method : {"pratt", "wilcox", "zsplit"}, optional
The following options are available (default is "wilcox"):
* "pratt": Includes zero-differences in the ranking process,
but drops the ranks of the zeros, see [4]_, (more conservative).
* "wilcox": Discards all zero-differences, the default.
* "zsplit": Includes zero-differences in the ranking process and
split the zero rank between positive and negative ones.
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic if a normal approximation is used. Default is False.
alternative : {"two-sided", "greater", "less"}, optional
The alternative hypothesis to be tested, see Notes. Default is
"two-sided".
mode : {"auto", "exact", "approx"}
Method to calculate the p-value, see Notes. Default is "auto".
Returns
-------
statistic : float
If ``alternative`` is "two-sided", the sum of the ranks of the
differences above or below zero, whichever is smaller.
Otherwise the sum of the ranks of the differences above zero.
pvalue : float
The p-value for the test depending on ``alternative`` and ``mode``.
See Also
--------
kruskal, mannwhitneyu
Notes
-----
The test has been introduced in [4]_. Given n independent samples
(xi, yi) from a bivariate distribution (i.e. paired samples),
it computes the differences di = xi - yi. One assumption of the test
is that the differences are symmetric, see [2]_.
The two-sided test has the null hypothesis that the median of the
differences is zero against the alternative that it is different from
zero. The one-sided test has the null hypothesis that the median is
positive against the alternative that it is negative
(``alternative == 'less'``), or vice versa (``alternative == 'greater.'``).
To derive the p-value, the exact distribution (``mode == 'exact'``)
can be used for sample sizes of up to 25. The default ``mode == 'auto'``
uses the exact distribution if there are at most 25 observations and no
ties, otherwise a normal approximation is used (``mode == 'approx'``).
The treatment of ties can be controlled by the parameter `zero_method`.
If ``zero_method == 'pratt'``, the normal approximation is adjusted as in
[5]_. A typical rule is to require that n > 20 ([2]_, p. 383).
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
.. [2] Conover, W.J., Practical Nonparametric Statistics, 1971.
.. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed
Rank Procedures, Journal of the American Statistical Association,
Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526`
.. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods,
Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968`
.. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank
Sampling Distribution When Zero Differences are Present,
Journal of the American Statistical Association, Vol. 62, 1967,
pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917`
Examples
--------
In [4]_, the differences in height between cross- and self-fertilized
corn plants is given as follows:
>>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75]
Cross-fertilized plants appear to be be higher. To test the null
hypothesis that there is no height difference, we can apply the
two-sided test:
>>> from scipy.stats import wilcoxon
>>> w, p = wilcoxon(d)
>>> w, p
(24.0, 0.041259765625)
Hence, we would reject the null hypothesis at a confidence level of 5%,
concluding that there is a difference in height between the groups.
To confirm that the median of the differences can be assumed to be
positive, we use:
>>> w, p = wilcoxon(d, alternative='greater')
>>> w, p
(96.0, 0.0206298828125)
This shows that the null hypothesis that the median is negative can be
rejected at a confidence level of 5% in favor of the alternative that
the median is greater than zero. The p-values above are exact. Using the
normal approximation gives very similar values:
>>> w, p = wilcoxon(d, mode='approx')
>>> w, p
(24.0, 0.04088813291185591)
Note that the statistic changed to 96 in the one-sided case (the sum
of ranks of positive differences) whereas it is 24 in the two-sided
case (the minimum of sum of ranks above and below zero).
"""
if mode not in ["auto", "approx", "exact"]:
raise ValueError("mode must be either 'auto', 'approx' or 'exact'")
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method must be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if alternative not in ["two-sided", "less", "greater"]:
raise ValueError("Alternative must be either 'two-sided', "
"'greater' or 'less'")
if y is None:
d = asarray(x)
if d.ndim > 1:
raise ValueError('Sample x must be one-dimensional.')
else:
x, y = map(asarray, (x, y))
if x.ndim > 1 or y.ndim > 1:
raise ValueError('Samples x and y must be one-dimensional.')
if len(x) != len(y):
raise ValueError('The samples x and y must have the same length.')
d = x - y
if mode == "auto":
if len(d) <= 25:
mode = "exact"
else:
mode = "approx"
n_zero = np.sum(d == 0)
if n_zero > 0 and mode == "exact":
mode = "approx"
warnings.warn("Exact p-value calculation does not work if there are "
"ties. Switching to normal approximation.")
if mode == "approx":
if zero_method in ["wilcox", "pratt"]:
if n_zero == len(d):
raise ValueError("zero_method 'wilcox' and 'pratt' do not "
"work if x - y is zero for all elements.")
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d)
count = len(d)
if count < 10 and mode == "approx":
warnings.warn("Sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r)
r_minus = np.sum((d < 0) * r)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
# return min for two-sided test, but r_plus for one-sided test
# the literature is not consistent here
# r_plus is more informative since r_plus + r_minus = count*(count+1)/2,
# i.e. the sum of the ranks, so r_minus and the min can be inferred
# (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.)
# [3] uses the r_plus for the one-sided test, keep min for two-sided test
# to keep backwards compatibility
if alternative == "two-sided":
T = min(r_plus, r_minus)
else:
T = r_plus
if mode == "approx":
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
# normal approximation needs to be adjusted, see Cureton (1967)
mn -= n_zero * (n_zero + 1.) * 0.25
se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
# apply continuity correction if applicable
d = 0
if correction:
if alternative == "two-sided":
d = 0.5 * np.sign(T - mn)
elif alternative == "less":
d = -0.5
else:
d = 0.5
# compute statistic and p-value using normal approximation
z = (T - mn - d) / se
if alternative == "two-sided":
prob = 2. * distributions.norm.sf(abs(z))
elif alternative == "greater":
# large T = r_plus indicates x is greater than y; i.e.
# accept alternative in that case and return small p-value (sf)
prob = distributions.norm.sf(z)
else:
prob = distributions.norm.cdf(z)
elif mode == "exact":
# get frequencies cnt of the possible positive ranksums r_plus
cnt = _get_wilcoxon_distr(count)
# note: r_plus is int (ties not allowed), need int for slices below
r_plus = int(r_plus)
if alternative == "two-sided":
if r_plus == (len(cnt) - 1) // 2:
# r_plus is the center of the distribution.
prob = 1.0
else:
p_less = np.sum(cnt[:r_plus + 1]) / 2**count
p_greater = np.sum(cnt[r_plus:]) / 2**count
prob = 2*min(p_greater, p_less)
elif alternative == "greater":
prob = np.sum(cnt[r_plus:]) / 2**count
else:
prob = np.sum(cnt[:r_plus + 1]) / 2**count
return WilcoxonResult(T, prob)
def median_test(*args, ties='below', correction=True, lambda_=1,
nan_policy='propagate'):
"""Perform a Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.nonzero((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _circfuncs_common(samples, high, low, nan_policy='propagate'):
# Ensure samples are array-like and size is not zero
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.asarray(np.nan), np.asarray(np.nan), None
# Recast samples as radians that range between 0 and 2 pi and calculate
# the sine and cosine
sin_samp = sin((samples - low)*2.*pi / (high - low))
cos_samp = cos((samples - low)*2.*pi / (high - low))
# Apply the NaN policy
contains_nan, nan_policy = _contains_nan(samples, nan_policy)
if contains_nan and nan_policy == 'omit':
mask = np.isnan(samples)
# Set the sines and cosines that are NaN to zero
sin_samp[mask] = 0.0
cos_samp[mask] = 0.0
else:
mask = None
return samples, sin_samp, cos_samp, mask
def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circmean : float
Circular mean.
Examples
--------
>>> from scipy.stats import circmean
>>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3])
0.2
>>> from scipy.stats import circmean
>>> circmean([0.2, 1.4, 2.6], high = 1, low = 0)
0.4
"""
samples, sin_samp, cos_samp, nmask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
sin_sum = sin_samp.sum(axis=axis)
cos_sum = cos_samp.sum(axis=axis)
res = arctan2(sin_sum, cos_sum)
mask_nan = ~np.isnan(res)
if mask_nan.ndim > 0:
mask = res[mask_nan] < 0
else:
mask = res < 0
if mask.ndim > 0:
mask_nan[mask_nan] = mask
res[mask_nan] += 2*pi
elif mask:
res += 2*pi
# Set output to NaN if no samples went into the mean
if nmask is not None:
if nmask.all():
res = np.full(shape=res.shape, fill_value=np.nan)
else:
# Find out if any of the axis that are being averaged consist
# entirely of NaN. If one exists, set the result (res) to NaN
nshape = 0 if axis is None else axis
smask = nmask.shape[nshape] == nmask.sum(axis=axis)
if smask.any():
res[smask] = np.nan
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""Compute the circular variance for samples assumed to be in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
Examples
--------
>>> from scipy.stats import circvar
>>> circvar([0, 2*np.pi/3, 5*np.pi/3])
2.19722457734
"""
samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
if mask is None:
sin_mean = sin_samp.mean(axis=axis)
cos_mean = cos_samp.mean(axis=axis)
else:
nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
nsum[nsum == 0] = np.nan
sin_mean = sin_samp.sum(axis=axis) / nsum
cos_mean = cos_samp.sum(axis=axis) / nsum
# hypot can go slightly above 1 due to rounding errors
with np.errstate(invalid='ignore'):
R = np.minimum(1, hypot(sin_mean, cos_mean))
return ((high - low)/2.0/pi)**2 * -2 * log(R)
def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
Examples
--------
>>> from scipy.stats import circstd
>>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2])
0.063564063306
"""
samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low,
nan_policy=nan_policy)
if mask is None:
sin_mean = sin_samp.mean(axis=axis)
cos_mean = cos_samp.mean(axis=axis)
else:
nsum = np.asarray(np.sum(~mask, axis=axis).astype(float))
nsum[nsum == 0] = np.nan
sin_mean = sin_samp.sum(axis=axis) / nsum
cos_mean = cos_samp.sum(axis=axis) / nsum
# hypot can go slightly above 1 due to rounding errors
with np.errstate(invalid='ignore'):
R = np.minimum(1, hypot(sin_mean, cos_mean))
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
| bsd-3-clause |
cojacoo/testcases_echoRD | gen_test1111.py | 1 | 4398 | import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_specht
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_gen1',experimental=True)
mc = mcp.mcpick_out(mc,'gen_test1.pickle')
runname='gen_test1111'
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
mc.md_macdepth=mc.md_depth[np.fmax(2,np.sum(np.ceil(mc.md_contact),axis=1).astype(int))]
mc.md_macdepth[mc.md_macdepth<=0.]=0.065
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart=60
precTS.tend=60+1800
precTS.total=0.01
precTS.intense=precTS.total/(precTS.tend-precTS.tstart)
#use modified routines for binned retention definitions
mc.part_sizefac=500
mc.gridcellA=mc.mgrid.vertfac*mc.mgrid.latfac
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
#DEBUG: a) we assume 2D=3D; b) change 20C to annual mean T?
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects=False
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_specht(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([pickle.dumps(particles),pickle.dumps([leftover,drained,t,TSstore,i])]), handle, protocol=2)
| gpl-3.0 |
davidam/python-examples | matplotlib/pyplot_text.py | 1 | 1407 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
"""
===========
Pyplot Text
===========
"""
import numpy as np
import matplotlib.pyplot as plt
# Fixing random state for reproducibility
np.random.seed(19680801)
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
# the histogram of the data
n, bins, patches = plt.hist(x, 50, density=True, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-3.0 |
sgraham/nope | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
sjperkins/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ehogan/iris | lib/iris/tests/test_trajectory.py | 9 | 9235 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import biggus
import numpy as np
import iris.analysis.trajectory
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
class TestSimple(tests.IrisTest):
def test_invalid_coord(self):
cube = iris.tests.stock.realistic_4d()
sample_points = [('altitude', [0, 10, 50])]
with self.assertRaises(ValueError):
iris.analysis.trajectory.interpolate(cube, sample_points, 'nearest')
class TestTrajectory(tests.IrisTest):
def test_trajectory_definition(self):
# basic 2-seg line along x
waypoints = [ {'lat':0, 'lon':0}, {'lat':0, 'lon':1}, {'lat':0, 'lon':2} ]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=21)
self.assertEqual(trajectory.length, 2.0)
self.assertEqual(trajectory.sampled_points[19], {'lat': 0.0, 'lon': 1.9000000000000001})
# 4-seg m-shape
waypoints = [ {'lat':0, 'lon':0}, {'lat':1, 'lon':1}, {'lat':0, 'lon':2}, {'lat':1, 'lon':3}, {'lat':0, 'lon':4} ]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=33)
self.assertEqual(trajectory.length, 5.6568542494923806)
self.assertEqual(trajectory.sampled_points[31], {'lat': 0.12499999999999989, 'lon': 3.875})
@tests.skip_data
@tests.skip_plot
def test_trajectory_extraction(self):
# Load the COLPEX data => TZYX
path = tests.get_data_path(['PP', 'COLPEX', 'theta_and_orog_subset.pp'])
cube = iris.load_cube(path, 'air_potential_temperature')
cube.coord('grid_latitude').bounds = None
cube.coord('grid_longitude').bounds = None
# TODO: Workaround until regrid can handle factories
cube.remove_aux_factory(cube.aux_factories[0])
cube.remove_coord('surface_altitude')
self.assertCML(cube, ('trajectory', 'big_cube.cml'))
# Pull out a single point - no interpolation required
single_point = iris.analysis.trajectory.interpolate(
cube, [('grid_latitude', [-0.1188]),
('grid_longitude', [359.57958984])])
expected = cube[..., 10, 0].data
self.assertArrayAllClose(single_point[..., 0].data, expected, rtol=2.0e-7)
self.assertCML(single_point, ('trajectory', 'single_point.cml'),
checksum=False)
# Pull out another point and test against a manually calculated result.
single_point = [['grid_latitude', [-0.1188]], ['grid_longitude', [359.584090412]]]
scube = cube[0, 0, 10:11, 4:6]
x0 = scube.coord('grid_longitude')[0].points
x1 = scube.coord('grid_longitude')[1].points
y0 = scube.data[0, 0]
y1 = scube.data[0, 1]
expected = y0 + ((y1 - y0) * ((359.584090412 - x0)/(x1 - x0)))
trajectory_cube = iris.analysis.trajectory.interpolate(scube,
single_point)
self.assertArrayAllClose(trajectory_cube.data, expected, rtol=2.0e-7)
# Extract a simple, axis-aligned trajectory that is similar to an indexing operation.
# (It's not exactly the same because the source cube doesn't have regular spacing.)
waypoints = [
{'grid_latitude': -0.1188, 'grid_longitude': 359.57958984},
{'grid_latitude': -0.1188, 'grid_longitude': 359.66870117}
]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=100)
def traj_to_sample_points(trajectory):
sample_points = []
src_points = trajectory.sampled_points
for name in six.iterkeys(src_points[0]):
values = [point[name] for point in src_points]
sample_points.append((name, values))
return sample_points
sample_points = traj_to_sample_points(trajectory)
trajectory_cube = iris.analysis.trajectory.interpolate(cube,
sample_points)
self.assertCML(trajectory_cube, ('trajectory',
'constant_latitude.cml'))
# Sanity check the results against a simple slice
plt.plot(cube[0, 0, 10, :].data)
plt.plot(trajectory_cube[0, 0, :].data)
self.check_graphic()
# Extract a zig-zag trajectory
waypoints = [
{'grid_latitude': -0.1188, 'grid_longitude': 359.5886},
{'grid_latitude': -0.0828, 'grid_longitude': 359.6606},
{'grid_latitude': -0.0468, 'grid_longitude': 359.6246},
]
trajectory = iris.analysis.trajectory.Trajectory(waypoints, sample_count=20)
sample_points = traj_to_sample_points(trajectory)
trajectory_cube = iris.analysis.trajectory.interpolate(
cube[0, 0], sample_points)
expected = np.array([287.95953369, 287.9190979, 287.95550537,
287.93240356, 287.83850098, 287.87869263,
287.90942383, 287.9463501, 287.74365234,
287.68856812, 287.75588989, 287.54611206,
287.48522949, 287.53356934, 287.60217285,
287.43795776, 287.59701538, 287.52468872,
287.45025635, 287.52716064], dtype=np.float32)
self.assertCML(trajectory_cube, ('trajectory', 'zigzag.cml'), checksum=False)
self.assertArrayAllClose(trajectory_cube.data, expected, rtol=2.0e-7)
# Sanity check the results against a simple slice
x = cube.coord('grid_longitude').points
y = cube.coord('grid_latitude').points
plt.pcolormesh(x, y, cube[0, 0, :, :].data)
x = trajectory_cube.coord('grid_longitude').points
y = trajectory_cube.coord('grid_latitude').points
plt.scatter(x, y, c=trajectory_cube.data)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
def test_tri_polar(self):
# load data
cubes = iris.load(tests.get_data_path(['NetCDF', 'ORCA2', 'votemper.nc']))
cube = cubes[0]
# The netCDF file has different data types for the points and
# bounds of 'depth'. This wasn't previously supported, so we
# emulate that old behaviour.
cube.coord('depth').bounds = cube.coord('depth').bounds.astype(np.float32)
# define a latitude trajectory (put coords in a different order to the cube, just to be awkward)
latitudes = list(range(-90, 90, 2))
longitudes = [-90]*len(latitudes)
sample_points = [('longitude', longitudes), ('latitude', latitudes)]
# extract
sampled_cube = iris.analysis.trajectory.interpolate(cube, sample_points)
self.assertCML(sampled_cube, ('trajectory', 'tri_polar_latitude_slice.cml'))
# turn it upside down for the visualisation
plot_cube = sampled_cube[0]
plot_cube = plot_cube[::-1, :]
plt.clf()
plt.pcolormesh(plot_cube.data, vmin=cube.data.min(), vmax=cube.data.max())
plt.colorbar()
self.check_graphic()
# Try to request linear interpolation.
# Not allowed, as we have multi-dimensional coords.
self.assertRaises(iris.exceptions.CoordinateMultiDimError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linear")
# Try to request unknown interpolation.
self.assertRaises(ValueError, iris.analysis.trajectory.interpolate, cube, sample_points, method="linekar")
def test_hybrid_height(self):
cube = tests.stock.simple_4d_with_hybrid_height()
# Put a biggus array on the cube so we can test deferred loading.
cube.lazy_data(biggus.NumpyArrayAdapter(cube.data))
traj = (('grid_latitude', [20.5, 21.5, 22.5, 23.5]),
('grid_longitude', [31, 32, 33, 34]))
xsec = iris.analysis.trajectory.interpolate(cube, traj, method='nearest')
# Check that creating the trajectory hasn't led to the original
# data being loaded.
self.assertTrue(cube.has_lazy_data())
self.assertCML([cube, xsec], ('trajectory', 'hybrid_height.cml'))
if __name__ == '__main__':
tests.main()
| lgpl-3.0 |
buguen/pylayers | pylayers/antprop/examples/ex_antvsh.py | 3 | 1481 | from pylayers.antprop.antenna import *
from pylayers.antprop.spharm import *
from pylayers.antprop.antvsh import *
from pylayers.util.pyutil import *
import matplotlib.pyplot as plt
from numpy import *
import matplotlib.pyplot as plt
import os
_filename = 'S1R1.mat'
A = Antenna(_filename,'ant/UWBAN/Matfile')
filename=getlong(_filename,'ant/UWBAN/Matfile')
Norig = os.path.getsize(filename)
freq = A.fa.reshape(104,1,1)
ed = A.getdelay(freq)
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*ed)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*ed)
A = vsh(A,dsf=2)
A.C.s1tos2(20)
A.C.s2tos3(1e-5)
A.savevsh3()
filevsh3 = getlong(_filename.replace('.mat','.vsh3'),'ant')
Nvsh3 = os.path.getsize(filevsh3)
ratio = Norig/(1.*Nvsh3)
print ratio
print "errel total"
et1,et2,et3 =A.errel(dsf=1,typ='s3')
et3l = 10*log10(et3)
print et3l
print "errel @ 46"
e1,e2,e3 = A.errel(kf=46,dsf=1,typ='s3')
print 10*log10(e3)
Nc = len(A.C.Br.ind3)
Nf = A.Nf
csize = 4*Nc*Nf
ch1 = _filename.replace('.mat','')
ch2 = ', Nf ='+str(Nf)
ch3 = ', ['+str(A.fa[0])+','+str(A.fa[-1])+' ] GHz'
ch4 = ', Nc = '+ str(Nc)
ch5 = ', size = '+ str(csize) +' complex values'
ch6 = ', compress = '+ str(ratio)[0:5]
ch7 = ', relative error ='+str(et3l)[0:5]+' dB'
A.C.plot(subp=False,titre=ch1+ch2+ch3+ch4+ch5+ch6)
#th = kron(A.theta,ones(A.Np))
#ph = kron(ones(A.Nt),A.phi)
#Fth,Fph = A.Fsynth3(th,ph)
#FTh = Fth.reshape(A.Nf,A.Nt,A.Np)
#FPh = Fph.reshape(A.Nf,A.Nt,A.Np)
#compdiag(46,A,A.theta,A.phi,FTh,FPh,'modulus')
plt.show()
| lgpl-3.0 |
subodhchhabra/pyxley | pyxley/charts/datamaps/datamaps.py | 2 | 4075 | from ..charts import Chart
import pandas as pd
from flask import request, jsonify, make_response
_COLOR_MAP = {
'light blue':'#add8e6',
"antique gold":'#fff4b0',
"antique silver":'#d7cdc4',
"beige": '#f5f5dc',
"black":'#000000',
"blue": '#8084ff',
"bronze": '#c95a0b',
"brown": '#864',
"burgundy": '#ff7272',
"burnt orange": '#cc5500',
"camel": '#c96',
"canary yellow": '#ffef00',
"cobalt": "#56b3ff",
"coral": "#ff9e80",
"dark green": '#006400',
"dark grey": '#666666',
"dark pink": '#e3489b',
"dark purple": '#540061',
"fuchsia": '#ff00ff',
"gold": '#fc0',
"gray": '#9c9c9c',
"green": "#83ff7f",
"grey": "#9c9c9c",
"jewel tone purple": '#ae2cc6',
"light green": '#90ee90',
"light grey": '#d3d3d3',
"light pink": '#ffd6d3',
"light purple": '#b0c4de',
"magenta": '#ff00ff',
"mustard": '#ffe761',
"navy": '#6c70ff',
"off-white": '#ffffdd',
"olive": '#808000',
"orange": '#ffc870',
"orange red": '#ff4500',
"pale yellow": '#ffff9d',
"pink": '#ffb6c1',
"purple": '#800080',
"red": '#ff0000',
"rose gold": '#ffba9d',
"silver": '#c0c0c0',
"soft orange": '#ffc63c',
"tan": '#d2b48c',
"teal": '#008080',
"teal green":'#a1dfc6',
"turquoise": '#40e0d0',
"white": '#ffffff',
"yellow": '#ffff00',
"other": '#111111',
"defaultFills": "black"
}
class Datamap(Chart):
""" Pyxley Datamaps Chart component.
This is the base class for the PyxleyJS Datamaps wrapper.
Args:
url: name of endpoint to transmit data.
chart_id: html element id.
params: parameters chart will be initialized with.
route_func: function called by the endpoint
"""
def __init__(self, chart_id, url, params, api_route):
opts = {
"url": url,
"chartid": chart_id,
"params": params
}
super(Datamap, self).__init__("Datamaps", opts, api_route)
class DatamapUSA(Datamap):
""" Wrapper for PyxleyJS Datamaps component.
By default, this class builds a simple endpoint function.
This can be overriden by supplying a route_func. When
a route_func has been supplied, only the url, init_params,
and route_func will be used.
Args:
url: name of endpoint to transmit data.
chart_id: html element id.
df: dataframe containing states and colors.
state_index: column name of dataframe containing states.
color_index: column name of dataframe containing colors.
init_params: parameters chart will be initialized with.
color_map: dictionary of color labels and hex values.
route_func: function called by the endpoint. default is None
"""
def __init__(self, url, chart_id, df,
state_index, color_index,
init_params={},
color_map=_COLOR_MAP,
route_func=None):
if not route_func:
def get_data():
args = {}
for c in init_params:
if request.args.get(c):
args[c] = request.args[c]
else:
args[c] = init_params[c]
return jsonify(DatamapUSA.to_json(
self.apply_filters(df, args),
state_index,
color_index,
color_map
))
route_func = get_data
super(DatamapUSA, self).__init__(chart_id, url, init_params, route_func)
@staticmethod
def to_json(df, state_index, color_index, fills):
"""Transforms dataframe to json response"""
records = {}
for i, row in df.iterrows():
records[row[state_index]] = {
"fillKey": row[color_index]
}
return {
"data": records,
"fills": fills
}
| mit |
olologin/scikit-learn | sklearn/decomposition/truncated_svd.py | 19 | 7884 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
harisbal/pandas | asv_bench/benchmarks/groupby.py | 3 | 18265 | import warnings
from string import ascii_letters
from itertools import product
from functools import partial
import numpy as np
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
TimeGrouper, Categorical, Timestamp)
import pandas.util.testing as tm
method_blacklist = {
'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean',
'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min',
'var', 'mad', 'describe', 'std'},
'datetime': {'median', 'prod', 'sem', 'cumsum', 'sum', 'mean', 'skew',
'cumprod', 'cummax', 'pct_change', 'var', 'mad', 'describe',
'std'}
}
class ApplyDictReturn(object):
def setup(self):
self.labels = np.arange(1000).repeat(10)
self.data = Series(np.random.randn(len(self.labels)))
def time_groupby_apply_dict_return(self):
self.data.groupby(self.labels).apply(lambda x: {'first': x.values[0],
'last': x.values[-1]})
class Apply(object):
def setup_cache(self):
N = 10**4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)
})
return df
def time_scalar_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(lambda x: 1)
def time_scalar_function_single_col(self, df):
df.groupby('key').apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, df):
df.groupby(['key', 'key2']).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, df):
df.groupby('key').apply(self.df_copy_function)
class Groups(object):
param_names = ['key']
params = ['int64_small', 'int64_large', 'object_small', 'object_large']
def setup_cache(self):
size = 10**6
data = {'int64_small': Series(np.random.randint(0, 100, size=size)),
'int64_large': Series(np.random.randint(0, 10000, size=size)),
'object_small': Series(
tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=size))),
'object_large': Series(
tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=size)))}
return data
def setup(self, data, key):
self.ser = data[key]
def time_series_groups(self, data, key):
self.ser.groupby(self.ser).groups
class GroupManyLabels(object):
params = [1, 1000]
param_names = ['ncols']
def setup(self, ncols):
N = 1000
data = np.random.randn(N, ncols)
self.labels = np.random.randint(0, 100, size=N)
self.df = DataFrame(data)
def time_sum(self, ncols):
self.df.groupby(self.labels).sum()
class Nth(object):
param_names = ['dtype']
params = ['float32', 'float64', 'datetime', 'object']
def setup(self, dtype):
N = 10**5
# with datetimes (GH7555)
if dtype == 'datetime':
values = date_range('1/1/2011', periods=N, freq='s')
elif dtype == 'object':
values = ['foo'] * N
else:
values = np.arange(N).astype(dtype)
key = np.arange(N)
self.df = DataFrame({'key': key, 'values': values})
self.df.iloc[1, 1] = np.nan # insert missing data
def time_frame_nth_any(self, dtype):
self.df.groupby('key').nth(0, dropna='any')
def time_groupby_nth_all(self, dtype):
self.df.groupby('key').nth(0, dropna='all')
def time_frame_nth(self, dtype):
self.df.groupby('key').nth(0)
def time_series_nth_any(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='any')
def time_series_nth_all(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='all')
def time_series_nth(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0)
class DateAttributes(object):
def setup(self):
rng = date_range('1/1/2000', '12/31/2005', freq='H')
self.year, self.month, self.day = rng.year, rng.month, rng.day
self.ts = Series(np.random.randn(len(rng)), index=rng)
def time_len_groupby_object(self):
len(self.ts.groupby([self.year, self.month, self.day]))
class Int64(object):
def setup(self):
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
i = np.random.choice(len(arr), len(arr) * 5)
arr = np.vstack((arr, arr[i]))
i = np.random.permutation(len(arr))
arr = arr[i]
self.cols = list('abcde')
self.df = DataFrame(arr, columns=self.cols)
self.df['jim'], self.df['joe'] = np.random.randn(2, len(self.df)) * 10
def time_overflow(self):
self.df.groupby(self.cols).max()
class CountMultiDtype(object):
def setup_cache(self):
n = 10000
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
dates[np.random.rand(n) > 0.5] = np.datetime64('nat')
offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat')
value2 = np.random.randn(n)
value2[np.random.rand(n) > 0.5] = np.nan
obj = np.random.choice(list('ab'), size=n).astype(object)
obj[np.random.randn(n) > 0.5] = np.nan
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'dates': dates,
'value2': value2,
'value3': np.random.randn(n),
'ints': np.random.randint(0, 1000, size=n),
'obj': obj,
'offsets': offsets})
return df
def time_multi_count(self, df):
df.groupby(['key1', 'key2']).count()
class CountMultiInt(object):
def setup_cache(self):
n = 10000
df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'ints': np.random.randint(0, 1000, size=n),
'ints2': np.random.randint(0, 1000, size=n)})
return df
def time_multi_int_count(self, df):
df.groupby(['key1', 'key2']).count()
def time_multi_int_nunique(self, df):
df.groupby(['key1', 'key2']).nunique()
class AggFunctions(object):
def setup_cache():
N = 10**5
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
df = DataFrame({'key1': fac1.take(np.random.randint(0, 3, size=N)),
'key2': fac2.take(np.random.randint(0, 2, size=N)),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
return df
def time_different_str_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': 'mean',
'value2': 'var',
'value3': 'sum'})
def time_different_numpy_functions(self, df):
df.groupby(['key1', 'key2']).agg({'value1': np.mean,
'value2': np.var,
'value3': np.sum})
def time_different_python_functions_multicol(self, df):
df.groupby(['key1', 'key2']).agg([sum, min, max])
def time_different_python_functions_singlecol(self, df):
df.groupby('key1').agg([sum, min, max])
class GroupStrings(object):
def setup(self):
n = 2 * 10**5
alpha = list(map(''.join, product(ascii_letters, repeat=4)))
data = np.random.choice(alpha, (n // 5, 4), replace=False)
data = np.repeat(data, 5, axis=0)
self.df = DataFrame(data, columns=list('abcd'))
self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3)
self.df = self.df.sample(frac=1).reset_index(drop=True)
def time_multi_columns(self):
self.df.groupby(list('abcd')).max()
class MultiColumn(object):
def setup_cache(self):
N = 10**5
key1 = np.tile(np.arange(100, dtype=object), 1000)
key2 = key1.copy()
np.random.shuffle(key1)
np.random.shuffle(key2)
df = DataFrame({'key1': key1,
'key2': key2,
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
return df
def time_lambda_sum(self, df):
df.groupby(['key1', 'key2']).agg(lambda x: x.values.sum())
def time_cython_sum(self, df):
df.groupby(['key1', 'key2']).sum()
def time_col_select_lambda_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(lambda x: x.values.sum())
def time_col_select_numpy_sum(self, df):
df.groupby(['key1', 'key2'])['data1'].agg(np.sum)
class Size(object):
def setup(self):
n = 10**5
offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
dates = np.datetime64('now') + offsets
self.df = DataFrame({'key1': np.random.randint(0, 500, size=n),
'key2': np.random.randint(0, 100, size=n),
'value1': np.random.randn(n),
'value2': np.random.randn(n),
'value3': np.random.randn(n),
'dates': dates})
self.draws = Series(np.random.randn(n))
labels = Series(['foo', 'bar', 'baz', 'qux'] * (n // 4))
self.cats = labels.astype('category')
def time_multi_size(self):
self.df.groupby(['key1', 'key2']).size()
def time_dt_timegrouper_size(self):
with warnings.catch_warnings(record=True):
self.df.groupby(TimeGrouper(key='dates', freq='M')).size()
def time_category_size(self):
self.draws.groupby(self.cats).size()
class GroupByMethods(object):
param_names = ['dtype', 'method', 'application']
params = [['int', 'float', 'object', 'datetime'],
['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin',
'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head',
'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique',
'pct_change', 'prod', 'rank', 'sem', 'shift', 'size', 'skew',
'std', 'sum', 'tail', 'unique', 'value_counts', 'var'],
['direct', 'transformation']]
def setup(self, dtype, method, application):
if method in method_blacklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
if dtype == 'int':
key = np.random.randint(0, size, size=size)
elif dtype == 'float':
key = np.concatenate([np.random.random(ngroups) * 0.1,
np.random.random(ngroups) * 10.0])
elif dtype == 'object':
key = ['foo'] * size
elif dtype == 'datetime':
key = date_range('1/1/2011', periods=size, freq='s')
df = DataFrame({'values': values, 'key': key})
if application == 'transform':
if method == 'describe':
raise NotImplementedError
self.as_group_method = lambda: df.groupby(
'key')['values'].transform(method)
self.as_field_method = lambda: df.groupby(
'values')['key'].transform(method)
else:
self.as_group_method = getattr(df.groupby('key')['values'], method)
self.as_field_method = getattr(df.groupby('values')['key'], method)
def time_dtype_as_group(self, dtype, method, application):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application):
self.as_field_method()
class RankWithTies(object):
# GH 21237
param_names = ['dtype', 'tie_method']
params = [['float64', 'float32', 'int64', 'datetime64'],
['first', 'average', 'dense', 'min', 'max']]
def setup(self, dtype, tie_method):
N = 10**4
if dtype == 'datetime64':
data = np.array([Timestamp("2011/01/01")] * N, dtype=dtype)
else:
data = np.array([1] * N, dtype=dtype)
self.df = DataFrame({'values': data, 'key': ['foo'] * N})
def time_rank_ties(self, dtype, tie_method):
self.df.groupby('key').rank(method=tie_method)
class Float32(object):
# GH 13335
def setup(self):
tmp1 = (np.random.random(10000) * 0.1).astype(np.float32)
tmp2 = (np.random.random(10000) * 10.0).astype(np.float32)
tmp = np.concatenate((tmp1, tmp2))
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
def time_sum(self):
self.df.groupby(['a'])['b'].sum()
class Categories(object):
def setup(self):
N = 10**5
arr = np.random.random(N)
data = {'a': Categorical(np.random.randint(10000, size=N)),
'b': arr}
self.df = DataFrame(data)
data = {'a': Categorical(np.random.randint(10000, size=N),
ordered=True),
'b': arr}
self.df_ordered = DataFrame(data)
data = {'a': Categorical(np.random.randint(100, size=N),
categories=np.arange(10000)),
'b': arr}
self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby('a')['b'].count()
def time_groupby_nosort(self):
self.df.groupby('a', sort=False)['b'].count()
def time_groupby_ordered_sort(self):
self.df_ordered.groupby('a')['b'].count()
def time_groupby_ordered_nosort(self):
self.df_ordered.groupby('a', sort=False)['b'].count()
def time_groupby_extra_cat_sort(self):
self.df_extra_cat.groupby('a')['b'].count()
def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby('a', sort=False)['b'].count()
class Datelike(object):
# GH 14338
params = ['period_range', 'date_range', 'date_range_tz']
param_names = ['grouper']
def setup(self, grouper):
N = 10**4
rng_map = {'period_range': period_range,
'date_range': date_range,
'date_range_tz': partial(date_range, tz='US/Central')}
self.grouper = rng_map[grouper]('1900-01-01', freq='D', periods=N)
self.df = DataFrame(np.random.randn(10**4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
class SumBools(object):
# GH 2692
def setup(self):
N = 500
self.df = DataFrame({'ii': range(N),
'bb': [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
class SumMultiLevel(object):
# GH 9049
timeout = 120.0
def setup(self):
N = 50
self.df = DataFrame({'A': list(range(N)) * 2,
'B': range(N * 2),
'C': 1}).set_index(['A', 'B'])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
class Transform(object):
def setup(self):
n1 = 400
n2 = 250
index = MultiIndex(levels=[np.arange(n1), tm.makeStringIndex(n2)],
labels=[np.repeat(range(n1), n2).tolist(),
list(range(n2)) * n1],
names=['lev1', 'lev2'])
arr = np.random.randn(n1 * n2, 3)
arr[::10000, 0] = np.nan
arr[1::10000, 1] = np.nan
arr[2::10000, 2] = np.nan
data = DataFrame(arr, index=index, columns=['col1', 'col20', 'col3'])
self.df = data
n = 20000
self.df1 = DataFrame(np.random.randint(1, n, (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df2 = self.df1.copy()
self.df2['jim'] = self.df2['joe']
self.df3 = DataFrame(np.random.randint(1, (n / 10), (n, 3)),
columns=['jim', 'joe', 'jolie'])
self.df4 = self.df3.copy()
self.df4['jim'] = self.df4['joe']
def time_transform_lambda_max(self):
self.df.groupby(level='lev1').transform(lambda x: max(x))
def time_transform_ufunc_max(self):
self.df.groupby(level='lev1').transform(np.max)
def time_transform_multi_key1(self):
self.df1.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key2(self):
self.df2.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key3(self):
self.df3.groupby(['jim', 'joe'])['jolie'].transform('max')
def time_transform_multi_key4(self):
self.df4.groupby(['jim', 'joe'])['jolie'].transform('max')
class TransformBools(object):
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({'signal': np.random.rand(N)})
def time_transform_mean(self):
self.df['signal'].groupby(self.g).transform(np.mean)
class TransformNaN(object):
# GH 12737
def setup(self):
self.df_nans = DataFrame({'key': np.repeat(np.arange(1000), 10),
'B': np.nan,
'C': np.nan})
self.df_nans.loc[4::10, 'B':'C'] = 5
def time_first(self):
self.df_nans.groupby('key').transform('first')
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
Blaffie/Hello-world | Programs/Firkant profil program.py | 1 | 5693 | #Program utregning av bøyningsspenning i Firkantprofil
import math
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from tkinter import * #Brukes til GUI
F = 1200 #N
lengde_start = 0
lengde_slutt = 2500 #mm
max_bøyespenning = 200 #N/mm2
#variabler
lengde_mellom = int(lengde_slutt / 10)
w_x_firkantprofil = ()
w_x_rør = ()
w_y_rør = ()
x_main = [0, ]
y_main = [0, ]
y_main_copy = []
x_main_copy = []
firkantprofil_dim_main = [0, ]
overskrift_graff = ""
def Firkantprofil_hull ():
global w_x_firkantprofil
global Størelse_profil
global overskrift_graff
global t
overskrift_graff = "Firkantprofil"
Størelse_profil = ()
t = 3
firkant_max_size = 100
firkant_min_size = 20
faktor = int(firkant_max_size / 10)
for Størelse_profil in range (firkant_min_size, firkant_max_size+faktor, 10):
B = Størelse_profil
H = B
b = B - (t*2)
h = H - (t*2)
w_x= ((B*H**3) - (b * h **3)) / (6 * H)
print ("B Størelse på firkantprofil: " + str(B))
print ("H Størelse på firkantprofil: " + str(H))
print ("Tykkelse på firkantprofil: " + str(t))
print ()
print("wx Firkantprofil: " + str(round(w_x, 2)))
print ()
utregning_sigma_b(w_x)
firkantprofil_dim_main.append (Størelse_profil)
def utregning_sigma_b (w_x):
global x_main_copy
global y_main_copy
lengde = lengde_start
for lengde in range (lengde_start, (lengde_slutt+lengde_mellom),lengde_mellom):
Mb = F * lengde
sigma_b = Mb / w_x
x_main.append(lengde)
y_main.append (sigma_b)
print ("sigmabøy:" + str(round(sigma_b, 2) ) + " N/mm2. "
+ "lengde: " + str(lengde) + " mm")
lengde += lengde_mellom
print ()
def Lag_Graff():
y_max = max_bøyespenning
#liste over kordinater
x1 = []
y1 = []
x2 = []
y2 = []
x3 = []
y3 = []
x4 = []
y4 = []
x5 = []
y5 = []
x6 = []
y6 = []
x7 = []
y7 = []
x8 = []
y8 = []
x9 = []
y9 = []
x10 = []
y10 = []
range_min = 1
range_max = 12
for x in range (range_min, range_max):
x1.append ( x_main.pop())
y1.append ( y_main.pop())
for x in range (range_min, range_max):
x2.append ( x_main.pop())
y2.append ( y_main.pop())
for x in range (range_min, range_max):
x3.append ( x_main.pop())
y3.append ( y_main.pop())
for x in range (range_min, range_max):
x4.append ( x_main.pop())
y4.append ( y_main.pop())
for x in range (range_min, range_max):
x5.append ( x_main.pop())
y5.append ( y_main.pop())
for x in range (range_min, range_max):
x6.append ( x_main.pop())
y6.append ( y_main.pop())
for x in range (range_min, range_max):
x7.append ( x_main.pop())
y7.append ( y_main.pop())
for x in range (range_min, range_max):
x8.append ( x_main.pop())
y8.append ( y_main.pop())
for x in range (range_min, range_max):
x9.append ( x_main.pop())
y9.append ( y_main.pop())
"""
for x in range (1, 11):
x10.append ( x_main.pop())
y10.append ( y_main.pop())
"""
style.use("seaborn-dark")
fig = plt.figure()
ax1 = fig.add_subplot(211)
plt.xlabel("Lengde i mm")
plt.ylabel("Sigma bøy N/mm^2")
plt.title("Oversikt over " + overskrift_graff)
firkantprofil_dim_main.reverse()
ax1.plot(x1, y1, label = firkantprofil_dim_main[0],linewidth=2, color = "#ff00ff") #rosa
ax1.plot(x2, y2, label = firkantprofil_dim_main[1],linewidth=2, color = "#20e251") #lyse grønn
ax1.plot(x3, y3, label = firkantprofil_dim_main[2],linewidth=2, color = "#20a129") #grønn
ax1.plot(x4, y4, label = firkantprofil_dim_main[3],linewidth=2, color = "#3e18e2") #blå
ax1.plot(x5, y5, label = firkantprofil_dim_main[4],linewidth=2, color = "#e23e18") #orange
ax1.plot(x6, y6, label = firkantprofil_dim_main[5],linewidth=2, color = "#14ded2") #cyan
ax1.plot(x7, y7, label = firkantprofil_dim_main[6],linewidth=2, color = "#efff00") #gull
ax1.plot(x8, y8, label = firkantprofil_dim_main[7],linewidth=2, color = "#52114d") #lilla
ax1.plot(x9, y9, label = firkantprofil_dim_main[8],linewidth=2, color = "#147151") #mørke grønn
#ax1.legend()
ax1.legend(bbox_to_anchor=(0., -0.27 , 1., .102), loc=2,
ncol=5, borderaxespad=0.)
#Text nedde til venstre
ax1.text(0, -(y_max * 0.15),
"Fargekoder på dimmensjon av " + overskrift_graff +
" i mm. Med en tykkelse på " + str(t) + "mm", fontsize=15)
#max min aksene
ax1.set_ylim([0, y_max])
plt.grid(True)
plt.show()
def Lag_GUI():
class Window (Frame):
def __init__(self, master=None):
Frame.__init__(self, master=None)
self.master = master
self.init_window()
def init_window(self):
self.master.title("Firkant profil utregninger")
self.pack(fill=BOTH, expand=1)
menu = Menu(self.master)
self.master.config(menu=menu)
file = Menu(menu)
file.add_command(label = "Lag graf", command = Lag_Graff )
file.add_command(label = "Avslut", command = self.client_exit)
menu.add_cascade(label="Valg", menu=file)
def client_exit(self):
exit()
root = Tk()
# size of the windwow
root.geometry("400x300")
app = Window(root)
root.mainloop()
Firkantprofil_hull()
Lag_GUI()
#Lag_Graff()
| mit |
rmeertens/paparazzi | sw/misc/attitude_reference/test_att_ref.py | 49 | 3485 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Antoine Drouin
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import math
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
import pat.utils as pu
import pat.algebra as pa
import control as ctl
def random_setpoint(time, dt_step=2):
tf = time[0]
sp = np.zeros((len(time), 3))
sp_i = [0, 0, 0]
for i in range(0, len(time)):
if time[i] >= tf:
ui = np.random.rand(3) - [0.5, 0.5, 0.5];
ai = np.random.rand(1)
n = np.linalg.norm(ui)
if n > 0:
ui /= n
sp_i = pa.euler_of_quat(pa.quat_of_axis_angle(ui, ai))
tf += dt_step
sp[i] = sp_i
return sp
def test_ref(r, time, setpoint):
ref = np.zeros((len(time), 9))
for i in range(1, time.size):
sp_quat = pa.quat_of_euler(setpoint[i])
r.update_quat(sp_quat, time[i] - time[i - 1])
euler = pa.euler_of_quat(r.quat)
ref[i] = np.concatenate((euler, r.vel, r.accel))
return ref
def plot_ref(time, xref=None, sp=None, figure=None):
margins = (0.05, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title='Reference', figsize=(20.48, 10.24), margins=margins)
plots = [("$\phi$", "deg"), ("$\\theta$", "deg"), ("$\\psi$", "deg"),
("$p$", "deg/s"), ("$q$", "deg/s"), ("$r$", "deg/s"),
("$\dot{p}$", "deg/s2"), ("$\dot{q}$", "deg/s2"), ("$\dot{r}$", "deg/s2")]
for i, (title, ylab) in enumerate(plots):
ax = plt.subplot(3, 3, i + 1)
if xref is not None: plt.plot(time, pu.deg_of_rad(xref[:, i]))
pu.decorate(ax, title=title, ylab=ylab)
if sp is not None and i < 3:
plt.plot(time, pu.deg_of_rad(sp[:, i]))
return figure
dt = 1. / 512.
time = np.arange(0., 4, dt)
sp = np.zeros((len(time), 3))
sp[:, 0] = pu.rad_of_deg(45.) * scipy.signal.square(math.pi / 2 * time + math.pi)
# sp[:, 1] = pu.rad_of_deg(5.)*scipy.signal.square(math.pi/2*time)
# sp[:, 2] = pu.rad_of_deg(45.)
# sp = random_setpoint(time)
# rs = [ctl.att_ref_analytic_disc(axis=0), ctl.att_ref_analytic_cont(axis=0), ctl.att_ref_default()]
args = {'omega': 10., 'xi': 0.7, 'sat_vel': pu.rad_of_deg(150.), 'sat_accel': pu.rad_of_deg(1800),
'sat_jerk': pu.rad_of_deg(27000)}
rs = [ctl.att_ref_sat_naive(**args), ctl.att_ref_sat_nested(**args), ctl.att_ref_sat_nested2(**args)]
# rs.append(ctl.AttRefIntNative(**args))
rs.append(ctl.AttRefFloatNative(**args))
xrs = [test_ref(r, time, sp) for r in rs]
figure = None
for xr in xrs:
figure = plot_ref(time, xr, None, figure)
figure = plot_ref(time, None, sp, figure)
legends = [r.name for r in rs] + ['Setpoint']
plt.subplot(3, 3, 3)
plt.legend(legends)
plt.show()
| gpl-2.0 |
hansbrenna/NetCDF_postprocessor | plotter4.py | 1 | 3593 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:31:31 2015
@author: hanbre
"""
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import xray
import datetime
import netCDF4
from mpl_toolkits.basemap import Basemap
import matplotlib
from matplotlib.pylab import *
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import seaborn as sns
from IPython import embed
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def read_data(id_in):
data = xray.open_dataset(id_in)
return data
def plotter(vm,x,y):
#fig=figure()
print('plotter')
xx,yy=np.meshgrid(x,y)
if shape(xx)!=shape(vm):
vm=vm.transpose()
gases = ['O3','HCL','CL','CLY','']
if var in gases:
CF = contourf(x,y,vm,linspace(np.amin(vm.values),np.amax(vm.values),10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),np.amax(vm.values),10),colors='k')
elif var == 'T':
CF = contourf(x,y,vm,linspace(np.amin(vm.values),400,10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),400,10),colors='k')
else:
norm = MidpointNormalize(midpoint=0)
CF=contourf(x,y,vm,np.linspace(np.amin(vm.values),np.amax(vm.values),1000),norm=norm,cmap='seismic')
CS=contour(x, y, vm,10,colors='k')
xlabel(x.units);ylabel(y.units)
clb = colorbar(CF); clb.set_label('('+v.units+')')
#title=('{0} at {1}={2} and {3}={4}'.format(var,getattr(v,pvar1)[p1],getattr(v,pvar1)[p1].values,getattr(v,pvar2)[p2],getattr(v,pvar2)[p2].values))
#close(fig)
return
def meaner(v,mvars):
vm = v.mean(dim=mvars)
return vm
def pointextr(v,pvar1,p1,pvar2,p2,pvars):
vm = v[pvars]
return vm
if __name__=='__main__':
avgall=False; bandavg=False; point=False;
if len(sys.argv)<5 or 'help' in sys.argv:
print( 'This script takes at least 5 command line arguments ',len(sys.argv),' is given. \n')
print( 'The usage is: Name of this script; path and name of netcdf file to be analysed;\n')
print( 'name of variable; name of x-axis; name of y-axis (time, lev, lat, lon)')
print( 'The 6th argumaent must be either point or band. If point')
print( 'a point must be specified in the other two dimensions on the form (dim1 point1 dim2 point2)')
sys.exit()
elif len(sys.argv)==5:
avgall = True
elif len(sys.argv) > 5:
if sys.argv[5] == 'band':
bandavg = True
if sys.argv[5] == 'cut':
point = True
if sys.argv[5] == 'point':
point = True
dim1 = sys.argv[6]
point1 = double(sys.argv[7])
dim2 = sys.argv[8]
point2 = double(sys.argv[9])
else:
print( "If this script is given more than 5 command line arguments, sys.argv[5] has to be 'cut', 'point' or 'band'. Give 'help' as an argument to show help text.")
sys.exit()
id_in=sys.argv[1]; var=sys.argv[2]
ds=read_data(id_in)
| gpl-3.0 |
devanshdalal/scikit-learn | sklearn/cluster/dbscan_.py | 20 | 12730 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
vrmarcelino/Shape-4-Qiime | merge_fasta.py | 1 | 2297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Concatenate different fasta files and add barcodes.
Run this script after separate fasta and qual files (see onvert_fastaqual_fastq.py from qiime)
Usage ex: merge_fasta.py samples_list.csv *.fna
Created on Thu Jul 31 15:49:39 2014
@author: VanessaRM
Still need to be done: match the .fna sample name with the sample_ID in the csv file.
At this point, this script will add the indexes by alphabetic(?) order,
so the indexe-sample match is not the same as the orginal ones.
"""
from Bio import SeqIO
import sys
import pandas # for .csv handling
#help
if len(sys.argv) == 1:
print ""
print "Script to concatenate fasta files and add indexes for Qiime pipeline"
print ""
print "Usage: supply the csv file with indexes and the fasta (.fasta or .fna) files"
print "ex: merge_fasta.py samples_list.csv *.fna"
print ""
print ""
sys.exit()
#input files:
samples_indexes = str(sys.argv[1])
si = pandas.read_csv(samples_indexes)
Index_seq = si["Frd_Index"] + si["Rev_Index_RC"]
input_fasta = []
for n in sys.argv[2:]:
input_fasta.append(str(n))
#Store the files
all_records = []
#function for adding barcode sequences
def add_barcode(records, barcode):
for seq_record in records:
seq_record.seq = (barcode + seq_record.seq)
all_records.append (seq_record)
#iterate over input files
counter = 0
mapping_file = ["#SampleID"+'\t'+"BarcodeSequence"+'\t'+"LinkerPrimerSequence"+'\t'+"BlaBlaBla"+'\t'+"Description"]
for file in input_fasta:
original_reads = SeqIO.parse(file, "fasta")
barcode_seq = Index_seq[counter]
print""
print "Adding the barcode %s to the %s file" %(barcode_seq, file)
do_it = add_barcode(original_reads, barcode_seq)
# Store info for mapping file
file_path = str(file)
name_split_1 = file_path.split("/")
full_sample_name = name_split_1[-1]
name_split_2 = full_sample_name.split("_")
sample_name = name_split_2[0]
mapping_file.append(sample_name + '\t' + barcode_seq)
counter +=1
# Save stuff
SeqIO.write(all_records, "all_records.fna", "fasta")
savefile = open("map.txt", "w")
for lines in mapping_file:
savefile.write("%s\n" % lines)
print""
print "Mapping file saved as 'map.txt'"
print ""
print "Done!"
| mit |
sunyihuan326/DeltaLab | shuwei_fengge/practice_one/model/SoftMax.py | 1 | 5279 | # coding:utf-8
'''
Created on 2017/11/15.
@author: chk01
'''
# 读取数据
# 数据预处理-reshape-标准化
# 每一步迭代步骤
# 循环迭代步骤
import os
import tensorflow as tf
from tensorflow.python.framework import ops
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as scio
from sklearn.model_selection import train_test_split
def init_sets(X, Y, file, distribute):
m = X.shape[1]
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
assert len(distribute) == 2
assert sum(distribute) == 1
scio.savemat(file + 'SoftMax_train',
{'X': shuffled_X[:, :int(m * distribute[0])], 'Y': shuffled_Y[:, :int(m * distribute[0])]})
scio.savemat(file + 'SoftMax_test',
{'X': shuffled_X[:, int(m * distribute[0]):], 'Y': shuffled_Y[:, int(m * distribute[0]):]})
return True
def load_data(file):
if not os.path.exists(file + 'SoftMax_test.mat'):
data = scio.loadmat(file)
init_sets(data['X'].T, data['Y'].T, file, distribute=[0.8, 0.2])
# X(784, 20000)
# Y(10, 20000)
return True
def initialize_parameters(n_x, n_y, file, ifExtend=False):
W1 = tf.get_variable(name='W1', dtype=tf.float32, shape=(n_y, n_x),
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable(dtype=tf.float32, name='b1', shape=(1),
initializer=tf.contrib.layers.xavier_initializer())
# b1 = tf.constant(0.1)
if ifExtend and os.path.exists(file + 'SoftMax_parameters'):
parameters = scio.loadmat(file + 'SoftMax_parameters')
W1 = tf.Variable(parameters['W1'])
b1 = tf.Variable(parameters['b1'])
parameters = {"W1": W1, 'b1': b1}
return parameters
def create_placeholders(n_x, n_y):
X = tf.placeholder(name='X', shape=(None, n_x), dtype=tf.float32)
Y = tf.placeholder(name='Y', shape=(None, n_y), dtype=tf.float32)
return X, Y
def forward_propagation(X, parameters):
W1 = parameters['W1']
b1 = parameters['b1']
Z1 = tf.add(tf.matmul(X, tf.transpose(W1)), b1)
# Z1 = tf.nn.dropout(Z1, 0.9)
return Z1
def compute_cost(Z1, Y, parameters, regular=False):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z1, labels=Y))
if regular:
cost += tf.contrib.layers.l2_regularizer(.2)(parameters['W1'])
return cost
def cost_fig(costs, learning_rate):
costs = np.squeeze(costs)
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return True
def data_check(data):
res = list(np.argmax(data.T, 1))
num = len(res)
classes = data.shape[0]
for i in range(classes):
print(str(i) + '的比例', round(100.0 * res.count(i) / num, 2), '%')
print('<------------------分割线---------------------->')
def model(X_train, Y_train, X_test, Y_test, file, epochs=2000, learning_rate=0.5, print_cost=True):
ops.reset_default_graph()
n_x = X_train.shape[1]
n_y = Y_train.shape[1]
costs = []
X, Y = create_placeholders(n_x, n_y)
print(X)
print(Y)
parameters = initialize_parameters(n_x, n_y, file)
print('W1===================', parameters['W1'])
print('b1==================', parameters['b1'])
Z1 = forward_propagation(X, parameters)
print('ZL========', Z1)
cost = compute_cost(Z1, Y, parameters, False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
z1, par, _, temp_cost = sess.run([Z1, parameters, optimizer, cost],
feed_dict={X: X_train, Y: Y_train})
if print_cost and epoch % 5 == 0:
print("Cost after epoch %i: %f" % (epoch, temp_cost))
if print_cost and epoch % 1 == 0:
costs.append(temp_cost)
cost_fig(costs, learning_rate)
predict_op = tf.argmax(Z1, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return par
if __name__ == '__main__':
name = 'Syh'
if name == 'Dxq':
file = 'F:/dataSets/MNIST/mnist_data_small'
elif name == 'Syh':
file = 'E:/deeplearning_Data/mnist_data_small'
data_train = scio.loadmat(file)
X_train, X_test, Y_train, Y_test = train_test_split(data_train['X'], data_train['Y'], test_size=0.2)
# print(X_train.shape,X_test.shape,Y_train.shape)
# data_check(Y_train)
# data_check(Y_test)
#
parameters = model(X_train, Y_train, X_test, Y_test, file, epochs=20, learning_rate=0.01)
W1 = parameters['W1']
b1 = parameters['b1']
scio.savemat(file + 'SoftMax_parameter', {'W1': W1, 'b1': b1})
| mit |
nborggren/zipline | zipline/utils/data.py | 1 | 15731 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import datetime
from collections import MutableMapping
from copy import deepcopy
try:
from six.moves._thread import get_ident
except ImportError:
from six.moves._dummy_thread import get_ident
import numpy as np
import pandas as pd
from toolz import merge
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer
class SortedDict(MutableMapping):
"""A mapping of key-value pairs sorted by key according to the sort_key
function provided to the mapping. Ties from the sort_key are broken by
comparing the original keys. `iter` traverses the keys in sort order.
Parameters
----------
key : callable
Called on keys in the mapping to produce the values by which those keys
are sorted.
mapping : mapping, optional
**kwargs
The initial mapping.
>>> d = SortedDict(abs)
>>> d[-1] = 'negative one'
>>> d[0] = 'zero'
>>> d[2] = 'two'
>>> d # doctest: +NORMALIZE_WHITESPACE
SortedDict(<built-in function abs>,
[(0, 'zero'), (-1, 'negative one'), (2, 'two')])
>>> d[1] = 'one' # Mutating the mapping maintains the sort order.
>>> d # doctest: +NORMALIZE_WHITESPACE
SortedDict(<built-in function abs>,
[(0, 'zero'), (-1, 'negative one'), (1, 'one'), (2, 'two')])
>>> del d[0]
>>> d # doctest: +NORMALIZE_WHITESPACE
SortedDict(<built-in function abs>,
[(-1, 'negative one'), (1, 'one'), (2, 'two')])
>>> del d[2]
>>> d
SortedDict(<built-in function abs>, [(-1, 'negative one'), (1, 'one')])
"""
def __init__(self, key, mapping=None, **kwargs):
self._map = {}
self._sorted_key_names = []
self._sort_key = key
self.update(merge(mapping or {}, kwargs))
def __getitem__(self, name):
return self._map[name]
def __setitem__(self, name, value, _bisect_right=bisect.bisect_right):
self._map[name] = value
if len(self._map) > len(self._sorted_key_names):
key = self._sort_key(name)
pair = (key, name)
idx = _bisect_right(self._sorted_key_names, pair)
self._sorted_key_names.insert(idx, pair)
def __delitem__(self, name, _bisect_left=bisect.bisect_left):
del self._map[name]
idx = _bisect_left(self._sorted_key_names,
(self._sort_key(name), name))
del self._sorted_key_names[idx]
def __iter__(self):
for key, name in self._sorted_key_names:
yield name
def __len__(self):
return len(self._map)
def __repr__(self, _repr_running={}):
# Based on OrderedDict/defaultdict
call_key = id(self), get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s(%r)' % (self.__class__.__name__, self._sort_key)
return '%s(%r, %r)' % (self.__class__.__name__, self._sort_key,
list(self.items()))
finally:
del _repr_running[call_key]
| apache-2.0 |
rsignell-usgs/notebook | UGRID/NECOFS_wave_levels.py | 1 | 4737 |
# coding: utf-8
# # Extract NECOFS data using NetCDF4-Python and analyze/visualize with Pandas
# In[1]:
# Plot forecast water levels from NECOFS model from list of lon,lat locations
# (uses the nearest point, no interpolation)
import netCDF4
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from StringIO import StringIO
get_ipython().magic(u'matplotlib inline')
# In[2]:
#model='NECOFS Massbay'
#url='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST.nc'
# GOM3 Grid
#model='NECOFS GOM3'
#url='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc'
model = 'NECOFS GOM3 Wave'
# forecast
#url = 'http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_WAVE_FORECAST.nc'
# archive
url = 'http://www.smast.umassd.edu:8080/thredds/dodsC/fvcom/archives/necofs_gom3_wave'
# In[3]:
# Desired time for snapshot
# ....right now (or some number of hours from now) ...
start = dt.datetime.utcnow() + dt.timedelta(hours=-72)
stop = dt.datetime.utcnow() + dt.timedelta(hours=+72)
# ... or specific time (UTC)
start = dt.datetime(1991,1,1,0,0,0) + dt.timedelta(hours=+0)
start = dt.datetime(1992,7,1,0,0,0) + dt.timedelta(hours=+0)
start = dt.datetime(1992,8,1,0,0,0) + dt.timedelta(hours=+0)
start = dt.datetime(2016,1,1,0,0,0) + dt.timedelta(hours=+0)
stop = dt.datetime(2016,6,1,0,0,0) + dt.timedelta(hours=+0)
# In[4]:
def dms2dd(d,m,s):
return d+(m+s/60.)/60.
# In[5]:
dms2dd(41,33,15.7)
# In[6]:
-dms2dd(70,30,20.2)
# In[7]:
x = '''
Station, Lat, Lon
Falmouth Harbor, 41.541575, -70.608020
Sage Lot Pond, 41.554361, -70.505611
'''
# In[8]:
x = '''
Station, Lat, Lon
Boston, 42.368186, -71.047984
Carolyn Seep Spot, 39.8083, -69.5917
Falmouth Harbor, 41.541575, -70.608020
'''
# In[9]:
# Enter desired (Station, Lat, Lon) values here:
x = '''
Station, Lat, Lon
Boston, 42.368186, -71.047984
Scituate Harbor, 42.199447, -70.720090
Scituate Beach, 42.209973, -70.724523
Falmouth Harbor, 41.541575, -70.608020
Marion, 41.689008, -70.746576
Marshfield, 42.108480, -70.648691
Provincetown, 42.042745, -70.171180
Sandwich, 41.767990, -70.466219
Hampton Bay, 42.900103, -70.818510
Gloucester, 42.610253, -70.660570
'''
# In[10]:
# Create a Pandas DataFrame
obs=pd.read_csv(StringIO(x.strip()), sep=",\s*",index_col='Station')
# In[11]:
obs
# In[12]:
# find the indices of the points in (x,y) closest to the points in (xi,yi)
def nearxy(x,y,xi,yi):
ind = np.ones(len(xi),dtype=int)
for i in np.arange(len(xi)):
dist = np.sqrt((x-xi[i])**2+(y-yi[i])**2)
ind[i] = dist.argmin()
return ind
# In[13]:
# open NECOFS remote OPeNDAP dataset
nc=netCDF4.Dataset(url).variables
# In[14]:
# find closest NECOFS nodes to station locations
obs['0-Based Index'] = nearxy(nc['lon'][:],nc['lat'][:],obs['Lon'],obs['Lat'])
obs
# In[15]:
# Get desired time step
time_var = nc['time']
istart = netCDF4.date2index(start,time_var,select='nearest')
istop = netCDF4.date2index(stop,time_var,select='nearest')
# In[16]:
# get time values and convert to datetime objects
jd = netCDF4.num2date(time_var[istart:istop],time_var.units)
# In[17]:
# get all time steps of water level from each station
nsta = len(obs)
z = np.ones((len(jd),nsta))
for i in range(nsta):
z[:,i] = nc['hs'][istart:istop,obs['0-Based Index'][i]]
# In[18]:
# make a DataFrame out of the interpolated time series at each location
zvals=pd.DataFrame(z,index=jd,columns=obs.index)
# In[19]:
# list out a few values
zvals.head()
# In[20]:
# model blew up producing very high waves on Jan 21, 2016
# eliminate unrealistically high values
mask = zvals>10.
zvals[mask] = np.NaN
# In[21]:
# plotting at DataFrame is easy!
ax=zvals.plot(figsize=(16,4),grid=True,title=('Wave Height from %s Forecast' % model),legend=False);
# read units from dataset for ylabel
plt.ylabel(nc['hs'].units)
# plotting the legend outside the axis is a bit tricky
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5));
# In[22]:
# what is the maximum over the whole record at a specific location
zvals['Boston'].max()
# In[23]:
# make a new DataFrame of maximum water levels at all stations
b=pd.DataFrame(zvals.idxmax(),columns=['time of max value (UTC)'])
# create heading for new column containing max water level
zmax_heading='zmax (%s)' % nc['hs'].units
# Add new column to DataFrame
b[zmax_heading]=zvals.max()
# In[24]:
b
# In[ ]:
# In[ ]:
# In[ ]:
| mit |
maxhutch/HighHolidayHonorDrafter | assign.py | 1 | 7003 | #!/usr/bin/env python3
import pandas as pd
import numpy as np
from hungarian_algorithm.hungarian import *
from web_io import get_sheet
from conf import members_url, honors_url, mhu_url, categories_url
from conf import override_url
honors = get_sheet(honors_url)
print("Read honors")
members = get_sheet(members_url)
mhus = get_sheet(mhu_url)
cats_new = get_sheet(categories_url)
override = get_sheet(override_url)
""" Clean up! """
shabbat = False
members['Tribe'] = members['Tribe'].fillna('Israel')
members['Last Honor'] = members['Last Honor'].fillna(2013)
honors['Name'] = honors['Name'].fillna("Delete")
honors = honors[honors.Name != "Delete"]
honors['Weight'] = honors['Weight'].fillna(1.0)
honors['Tribe'] = honors['Tribe'].fillna('Israel')
honors['Hebrew'] = (honors['Type'] == 'Aliyah')
honors['Shabbat'] = honors['Shabbat'].fillna('Any')
mhus['Family service'] = mhus['Family service'].fillna(False)
cats_new = cats_new.fillna(0.0)
if shabbat:
honors = honors[honors.Shabbat != 'Exclude']
else:
honors = honors[honors.Shabbat != 'Only']
cats_new = cats_new.drop("Last honored", 1)
cats_d = {}
for cat in cats_new.columns.values:
if cat == 'Name' or cat == 'Last honored':
continue
cats_d[cat] = [cats_new.iloc[0][cat] ,]
for i in range(2, cats_new.shape[0]):
for cat in cats_new.columns.values:
if cat == 'Name' or cat == 'Last honored':
continue
if cats_new.iloc[i][cat] == 1:
cats_d[cat].append(cats_new.iloc[i]["Name"])
max_len = max([len(cats_d[k]) for k in cats_d])
for k in cats_d:
while len(cats_d[k]) < max_len:
cats_d[k].append("")
#print(cats_d["New Members"])
#print(cats_d["Does not come for HH"])
cats = pd.DataFrame(cats_d)
#print(cats)
print("Starting with {:d} members".format(members.shape[0]))
""" Remove overrides """
assignments = {}
honors_all = honors.copy()
for k in range(override.shape[0]):
assignments[(override.iloc[k]["Honor"], override.iloc[k]["Service"])] = override.iloc[k]["Name"]
members = members[members["Name"] != override.iloc[k]["Name"]]
foo = honors.shape[0]
honors = honors[
(honors["Name"] != override.iloc[k]["Honor"])
| (honors["Service"] != override.iloc[k]["Service"])]
foo = foo - honors.shape[0]
if foo != 1:
print(override.iloc[k]["Honor"], override.iloc[k]["Service"], override.iloc[k]["Name"], foo)
print("Now {:d} members".format(members.shape[0]))
""" Remove zero-score categories """
to_drop = []
for cat in cats.columns.values:
if float(cats[cat][0]) > 0:
continue
to_drop.append(cat)
for name in list(cats[cat])[2:]:
if name in list(members.Name):
members = members[members["Name"] != name]
for cat in to_drop:
cats = cats.drop(cat, 1)
print("Down to {:d} members".format(members.shape[0]))
name_to_mhu = {}
for i in range(mhus.shape[0]):
mhu = mhus.iloc[i]
for name in list(mhu)[1:]:
name_to_mhu[name] = i
i = mhus.shape[0]
for name in list(members.Name):
if name in name_to_mhu:
continue
name_to_mhu[name] = i
mhus = mhus.append(pd.DataFrame([{"Family service" : False, "M1": name},]),ignore_index=True)
i = i + 1
rank = max(honors.shape[0], mhus.shape[0])
scores_individual = np.zeros((members.shape[0], rank))
scores_mhu = np.zeros((rank, rank))
this_year = 2015
name_to_member = {}
cat_counts = {}
assigned_counts = {}
for cat in cats.columns.values:
cat_counts[cat] = 0
assigned_counts[cat] = 0
cat_counts["Three year"] = 0
cat_counts["Two year"] = 0
assigned_counts["Three year"] = 0
assigned_counts["Two year"] = 0
print("Scoring {:d} members".format(members.shape[0]))
for j in range(members.shape[0]):
i = 0
mem = members.iloc[j]
name_to_member[mem.Name] = j
for i in range(honors.shape[0]):
honor = honors.iloc[i]
if honor['Tribe'] != 'Israel' and mem['Tribe'] != honor['Tribe']:
continue
if honor['Hebrew'] and not mem['Hebrew']:
continue
scores_individual[j, i] = honor["Weight"]
if (this_year - mem['Last Honor'] == 3):
mult = 3
cat_counts["Three year"] += 1
elif (this_year - mem['Last Honor'] == 2):
mult = 2
cat_counts["Two year"] += 1
else:
mult = 1.
for cat in cats.columns.values:
if mem.Name in list(cats[cat]):
cat_counts[cat] = cat_counts[cat] + 1
this_mult = float(cats[cat][0])
if this_mult == 0:
mult = 0
break
mult = max(this_mult, mult)
scores_individual[j,:] *= mult
ii = 0
for i in range(mhus.shape[0]):
mhu = mhus.iloc[i]
for name in list(mhu)[1:]:
if pd.isnull(name):
break
if name in name_to_member:
scores_mhu[i,:] = np.maximum(scores_mhu[i,:], scores_individual[name_to_member[name],:])
if scores_mhu[i,0] < 3:
print(list(mhu)[1])
if not mhu["Family service"]:
continue
for j in range(honors.shape[0]):
honor = honors.iloc[j]
if honor.Service == "RH1" or honor.Service == "RH2" or honor.Service == "YK - Torah":
scores_mhu[i,j] = 0.
if scores_mhu[i,0] < 3:
print(list(mhu)[1])
print(scores_mhu[:,0])
print("Solving Hungarian for N={:d}".format(rank))
hung = Hungarian(scores_mhu, is_profit_matrix=True)
hung.calculate()
results = hung.get_results()
# Optimal outcome is the members with the highest maximum score being assigned to the maximal part
opt_potential = np.sum(np.sort(np.max(scores_mhu, axis=1))[-min(honors.shape[0],mhus.shape[0]):])
from random import randint
for res in results:
if res[1] >= honors.shape[0] or res[0] >= mhus.shape[0]:
continue
winner = "No One"; best = 0
for name in list(mhus.iloc[res[0]])[1:]:
if pd.isnull(name) or not (name in name_to_member):
continue
this_score = scores_individual[name_to_member[name], res[1]]
if this_score > best or (this_score == best and randint(0,1) == 1):
winner = name
best = scores_individual[name_to_member[name], res[1]]
for cat in cats.columns.values:
if winner in list(cats[cat]):
assigned_counts[cat] = assigned_counts[cat] + 1
if best == 3:
assigned_counts["Three year"] += 1
if best == 2:
assigned_counts["Two year"] += 1
#print("{:20s} is assigned to {:s} for {:s}".format(winner, honors.iloc[res[1]].Name, honors.iloc[res[1]].Service))
assignments[(honors.iloc[res[1]].Name, honors.iloc[res[1]].Service)] = winner
print("Total score is {:f} of {:f}".format(hung.get_total_potential(), opt_potential))
for cat in list(cats.columns.values) + ["Three year", "Two year"]:
print("{:20s}: {:3d} of {:3d} assigned".format(cat, assigned_counts[cat], cat_counts[cat]))
from collections import Counter
counts = Counter(assignments.values())
print(counts)
final = honors_all.copy(deep=True)
final.loc[:,'Assignee'] = pd.Series("None", index=final.index)
for i in range(final.shape[0]):
honor = final.iloc[i:i+1]
label = honor.index[0]
honor = honor.iloc[0]
#print(label, assignments[(honor.Name, honor.Service)])
final.loc[label,'Assignee'] = assignments[(honor.Name, honor.Service)]
final.to_csv("./final_assignments.csv")
| gpl-3.0 |
cybercomgroup/Big_Data | Cloudera/Code/Titanic_Dataset/class_distr_gender_surv.py | 1 | 1704 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Path to file need to be changed.
titanic_df = pd.read_csv("train.csv")
classes = []
# We know there are First Class (1), Second (2) and Third (3)
for i in range(1,4):
classes.append( titanic_df[ titanic_df["Pclass"] == i ] )
fem = []
male = []
# Take all females and males from every class and put in seperate lists.
for data in classes:
fem.append( data[ data['Sex'] == 'female' ])
male.append( data[ data['Sex'] == 'male' ])
fem_surv = []
male_surv = []
# Take all that survivec from every class and every gender and put in seperate lists.
for data in fem:
fem_surv.append( data[ data['Survived'] == 1 ])
for data in male:
male_surv.append( data[ data['Survived'] == 1 ])
# params(function, args[]) returns (x1, .. x-len(args)) where xi = f(args[i])
def func(f, args):
ans = []
for x in args:
ans.append(f(x))
return tuple(ans)
# Stuff to show as bar chart.
index = np.arange(3)
bar_width = 0.2
opacity = 0.7
plt.bar(index, func(len, fem), bar_width, alpha = opacity, color = 'r', label = 'Female total')
plt.bar(index + bar_width, func(len, fem_surv), bar_width, alpha = opacity, color = 'y', label = 'Female survived')
plt.bar(index + (2 * bar_width), func(len, male), bar_width, alpha = opacity, color = 'b', label = 'Male total')
plt.bar(index + (3 * bar_width), func(len, male_surv), bar_width, alpha = opacity, color = 'g', label = 'Male survived')
plt.xticks(index + bar_width + bar_width / 2, ('First class', 'Second class', 'Third class'))
plt.title("Class and Gender survival")
plt.ylabel('Count')
plt.xlabel('PClass')
plt.legend()
plt.tight_layout()
plt.show()
| gpl-3.0 |
arcyfelix/ML-DL-AI | Supervised Learning/GANs/dcgan-tensorflayer/tensorlayer/utils.py | 1 | 21433 | #! /usr/bin/python
# -*- coding: utf8 -*-
import tensorflow as tf
import tensorlayer as tl
from . import iterate
import numpy as np
import time
import math
import random
def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_size=100,
n_epoch=100, print_freq=5, X_val=None, y_val=None, eval_train=True,
tensorboard=False, tensorboard_epoch_freq=5, tensorboard_weight_histograms=True, tensorboard_graph_vis=True):
"""Traing a given non time-series network by the given cost function, training data, batch_size, n_epoch etc.
Parameters
----------
sess : TensorFlow session
sess = tf.InteractiveSession()
network : a TensorLayer layer
the network will be trained
train_op : a TensorFlow optimizer
like tf.train.AdamOptimizer
X_train : numpy array
the input of training data
y_train : numpy array
the target of training data
x : placeholder
for inputs
y_ : placeholder
for targets
acc : the TensorFlow expression of accuracy (or other metric) or None
if None, would not display the metric
batch_size : int
batch size for training and evaluating
n_epoch : int
the number of training epochs
print_freq : int
display the training information every ``print_freq`` epochs
X_val : numpy array or None
the input of validation data
y_val : numpy array or None
the target of validation data
eval_train : boolean
if X_val and y_val are not None, it refects whether to evaluate the training data
tensorboard : boolean
if True summary data will be stored to the log/ direcory for visualization with tensorboard.
See also detailed tensorboard_X settings for specific configurations of features. (default False)
Also runs tl.layers.initialize_global_variables(sess) internally in fit() to setup the summary nodes, see Note:
tensorboard_epoch_freq : int
how many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5)
tensorboard_weight_histograms : boolean
if True updates tensorboard data in the logs/ directory for visulaization
of the weight histograms every tensorboard_epoch_freq epoch (default True)
tensorboard_graph_vis : boolean
if True stores the graph in the tensorboard summaries saved to log/ (default True)
Examples
--------
>>> see tutorial_mnist_simple.py
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False)
>>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
... acc=acc, batch_size=500, n_epoch=200, print_freq=5,
... X_val=X_val, y_val=y_val, eval_train=False,
... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True)
Note
--------
If tensorboard=True, the global_variables_initializer will be run inside the fit function
in order to initalize the automatically generated summary nodes used for tensorboard visualization,
thus tf.global_variables_initializer().run() before the fit() call will be undefined.
"""
assert X_train.shape[0] >= batch_size, "Number of training examples should be bigger than the batch size"
if(tensorboard):
print("Setting up tensorboard ...")
#Set up tensorboard summaries and saver
tl.files.exists_or_mkdir('logs/')
#Only write summaries for more recent TensorFlow versions
if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
if tensorboard_graph_vis:
train_writer = tf.summary.FileWriter('logs/train',sess.graph)
val_writer = tf.summary.FileWriter('logs/validation',sess.graph)
else:
train_writer = tf.summary.FileWriter('logs/train')
val_writer = tf.summary.FileWriter('logs/validation')
#Set up summary nodes
if(tensorboard_weight_histograms):
for param in network.all_params:
if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'):
print('Param name ', param.name)
tf.summary.histogram(param.name, param)
if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'):
tf.summary.scalar('cost', cost)
merged = tf.summary.merge_all()
#Initalize all variables and summaries
tl.layers.initialize_global_variables(sess)
print("Finished! use $tensorboard --logdir=logs/ to start server")
print("Start training the network ...")
start_time_begin = time.time()
tensorboard_train_index, tensorboard_val_index = 0, 0
for epoch in range(n_epoch):
start_time = time.time()
loss_ep = 0; n_step = 0
for X_train_a, y_train_a in iterate.minibatches(X_train, y_train,
batch_size, shuffle=True):
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update( network.all_drop ) # enable noise layers
loss, _ = sess.run([cost, train_op], feed_dict=feed_dict)
loss_ep += loss
n_step += 1
loss_ep = loss_ep/ n_step
if tensorboard and hasattr(tf, 'summary'):
if epoch+1 == 1 or (epoch+1) % tensorboard_epoch_freq == 0:
for X_train_a, y_train_a in iterate.minibatches(
X_train, y_train, batch_size, shuffle=True):
dp_dict = dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
result = sess.run(merged, feed_dict=feed_dict)
train_writer.add_summary(result, tensorboard_train_index)
tensorboard_train_index += 1
for X_val_a, y_val_a in iterate.minibatches(
X_val, y_val, batch_size, shuffle=True):
dp_dict = dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
result = sess.run(merged, feed_dict=feed_dict)
val_writer.add_summary(result, tensorboard_val_index)
tensorboard_val_index += 1
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
if (X_val is not None) and (y_val is not None):
print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
if eval_train is True:
train_loss, train_acc, n_batch = 0, 0, 0
for X_train_a, y_train_a in iterate.minibatches(
X_train, y_train, batch_size, shuffle=True):
dp_dict = dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X_train_a, y_: y_train_a}
feed_dict.update(dp_dict)
if acc is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
train_acc += ac
else:
err = sess.run(cost, feed_dict=feed_dict)
train_loss += err; n_batch += 1
print(" train loss: %f" % (train_loss/ n_batch))
if acc is not None:
print(" train acc: %f" % (train_acc/ n_batch))
val_loss, val_acc, n_batch = 0, 0, 0
for X_val_a, y_val_a in iterate.minibatches(
X_val, y_val, batch_size, shuffle=True):
dp_dict = dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X_val_a, y_: y_val_a}
feed_dict.update(dp_dict)
if acc is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
val_acc += ac
else:
err = sess.run(cost, feed_dict=feed_dict)
val_loss += err; n_batch += 1
print(" val loss: %f" % (val_loss/ n_batch))
if acc is not None:
print(" val acc: %f" % (val_acc/ n_batch))
else:
print("Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep))
print("Total training time: %fs" % (time.time() - start_time_begin))
def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None):
"""
Test a given non time-series network by the given test data and metric.
Parameters
----------
sess : TensorFlow session
sess = tf.InteractiveSession()
network : a TensorLayer layer
the network will be trained
acc : the TensorFlow expression of accuracy (or other metric) or None
if None, would not display the metric
X_test : numpy array
the input of test data
y_test : numpy array
the target of test data
x : placeholder
for inputs
y_ : placeholder
for targets
batch_size : int or None
batch size for testing, when dataset is large, we should use minibatche for testing.
when dataset is small, we can set it to None.
cost : the TensorFlow expression of cost or None
if None, would not display the cost
Examples
--------
>>> see tutorial_mnist_simple.py
>>> tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost)
"""
print('Start testing the network ...')
if batch_size is None:
dp_dict = dict_to_one( network.all_drop )
feed_dict = {x: X_test, y_: y_test}
feed_dict.update(dp_dict)
if cost is not None:
print(" test loss: %f" % sess.run(cost, feed_dict=feed_dict))
print(" test acc: %f" % sess.run(acc, feed_dict=feed_dict))
# print(" test acc: %f" % np.mean(y_test == sess.run(y_op,
# feed_dict=feed_dict)))
else:
test_loss, test_acc, n_batch = 0, 0, 0
for X_test_a, y_test_a in iterate.minibatches(
X_test, y_test, batch_size, shuffle=True):
dp_dict = dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X_test_a, y_: y_test_a}
feed_dict.update(dp_dict)
if cost is not None:
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
test_loss += err
else:
ac = sess.run(acc, feed_dict=feed_dict)
test_acc += ac; n_batch += 1
if cost is not None:
print(" test loss: %f" % (test_loss/ n_batch))
print(" test acc: %f" % (test_acc/ n_batch))
def predict(sess, network, X, x, y_op):
"""
Return the predict results of given non time-series network.
Parameters
----------
sess : TensorFlow session
sess = tf.InteractiveSession()
network : a TensorLayer layer
the network will be trained
X : numpy array
the input
x : placeholder
for inputs
y_op : placeholder
the argmax expression of softmax outputs
Examples
--------
>>> see tutorial_mnist_simple.py
>>> y = network.outputs
>>> y_op = tf.argmax(tf.nn.softmax(y), 1)
>>> print(tl.utils.predict(sess, network, X_test, x, y_op))
"""
dp_dict = dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X,}
feed_dict.update(dp_dict)
return sess.run(y_op, feed_dict=feed_dict)
## Evaluation
def evaluation(y_test=None, y_predict=None, n_classes=None):
"""
Input the predicted results, targets results and
the number of class, return the confusion matrix, F1-score of each class,
accuracy and macro F1-score.
Parameters
----------
y_test : numpy.array or list
target results
y_predict : numpy.array or list
predicted results
n_classes : int
number of classes
Examples
--------
>>> c_mat, f1, acc, f1_macro = evaluation(y_test, y_predict, n_classes)
"""
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score
c_mat = confusion_matrix(y_test, y_predict, labels = [x for x in range(n_classes)])
f1 = f1_score(y_test, y_predict, average = None, labels = [x for x in range(n_classes)])
f1_macro = f1_score(y_test, y_predict, average='macro')
acc = accuracy_score(y_test, y_predict)
print('confusion matrix: \n',c_mat)
print('f1-score:',f1)
print('f1-score(macro):',f1_macro) # same output with > f1_score(y_true, y_pred, average='macro')
print('accuracy-score:', acc)
return c_mat, f1, acc, f1_macro
def dict_to_one(dp_dict={}):
"""
Input a dictionary, return a dictionary that all items are set to one,
use for disable dropout, dropconnect layer and so on.
Parameters
----------
dp_dict : dictionary
keeping probabilities
Examples
--------
>>> dp_dict = dict_to_one( network.all_drop )
>>> dp_dict = dict_to_one( network.all_drop )
>>> feed_dict.update(dp_dict)
"""
return {x: 1 for x in dp_dict}
def flatten_list(list_of_list=[[],[]]):
"""
Input a list of list, return a list that all items are in a list.
Parameters
----------
list_of_list : a list of list
Examples
--------
>>> tl.utils.flatten_list([[1, 2, 3],[4, 5],[6]])
... [1, 2, 3, 4, 5, 6]
"""
return sum(list_of_list, [])
def class_balancing_oversample(X_train=None, y_train=None, printable=True):
"""Input the features and labels, return the features and labels after oversampling.
Parameters
----------
X_train : numpy.array
Features, each row is an example
y_train : numpy.array
Labels
Examples
--------
- One X
>>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True)
- Two X
>>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False)
>>> X1 = X[:, 0:5]
>>> X2 = X[:, 5:]
"""
# ======== Classes balancing
if printable:
print("Classes balancing for training examples...")
from collections import Counter
c = Counter(y_train)
if printable:
print('the occurrence number of each stage: %s' % c.most_common())
print('the least stage is Label %s have %s instances' % c.most_common()[-1])
print('the most stage is Label %s have %s instances' % c.most_common(1)[0])
most_num = c.most_common(1)[0][1]
if printable:
print('most num is %d, all classes tend to be this num' % most_num)
locations = {}
number = {}
for lab, num in c.most_common(): # find the index from y_train
number[lab] = num
locations[lab] = np.where(np.array(y_train)==lab)[0]
if printable:
print('convert list(np.array) to dict format')
X = {} # convert list to dict
for lab, num in number.items():
X[lab] = X_train[locations[lab]]
# oversampling
if printable:
print('start oversampling')
for key in X:
temp = X[key]
while True:
if len(X[key]) >= most_num:
break
X[key] = np.vstack((X[key], temp))
if printable:
print('first features of label 0 >', len(X[0][0]))
print('the occurrence num of each stage after oversampling')
for key in X:
print(key, len(X[key]))
if printable:
print('make each stage have same num of instances')
for key in X:
X[key] = X[key][0:most_num,:]
print(key, len(X[key]))
# convert dict to list
if printable:
print('convert from dict to list format')
y_train = []
X_train = np.empty(shape=(0,len(X[0][0])))
for key in X:
X_train = np.vstack( (X_train, X[key] ) )
y_train.extend([key for i in range(len(X[key]))])
# print(len(X_train), len(y_train))
c = Counter(y_train)
if printable:
print('the occurrence number of each stage after oversampling: %s' % c.most_common())
# ================ End of Classes balancing
return X_train, y_train
## Random
def get_random_int(min=0, max=10, number=5, seed=None):
"""Return a list of random integer by the given range and quantity.
Examples
---------
>>> r = get_random_int(min=0, max=10, number=5)
... [10, 2, 3, 3, 7]
"""
rnd = random.Random()
if seed:
rnd = random.Random(seed)
# return [random.randint(min,max) for p in range(0, number)]
return [rnd.randint(min,max) for p in range(0, number)]
#
# def class_balancing_sequence_4D(X_train, y_train, sequence_length, model='downsampling' ,printable=True):
# ''' 输入、输出都是sequence format
# oversampling or downsampling
# '''
# n_features = X_train.shape[2]
# # ======== Classes balancing for sequence
# if printable:
# print("Classes balancing for 4D sequence training examples...")
# from collections import Counter
# c = Counter(y_train) # Counter({2: 454, 4: 267, 3: 124, 1: 57, 0: 48})
# if printable:
# print('the occurrence number of each stage: %s' % c.most_common())
# print('the least Label %s have %s instances' % c.most_common()[-1])
# print('the most Label %s have %s instances' % c.most_common(1)[0])
# # print(c.most_common()) # [(2, 454), (4, 267), (3, 124), (1, 57), (0, 48)]
# most_num = c.most_common(1)[0][1]
# less_num = c.most_common()[-1][1]
#
# locations = {}
# number = {}
# for lab, num in c.most_common():
# number[lab] = num
# locations[lab] = np.where(np.array(y_train)==lab)[0]
# # print(locations)
# # print(number)
# if printable:
# print(' convert list to dict')
# X = {} # convert list to dict
# ### a sequence
# for lab, _ in number.items():
# X[lab] = np.empty(shape=(0,1,n_features,1)) # 4D
# for lab, _ in number.items():
# #X[lab] = X_train[locations[lab]
# for l in locations[lab]:
# X[lab] = np.vstack((X[lab], X_train[l*sequence_length : (l+1)*(sequence_length)]))
# # X[lab] = X_train[locations[lab]*sequence_length : locations[lab]*(sequence_length+1)] # a sequence
# # print(X)
#
# if model=='oversampling':
# if printable:
# print(' oversampling -- most num is %d, all classes tend to be this num\nshuffle applied' % most_num)
# for key in X:
# temp = X[key]
# while True:
# if len(X[key]) >= most_num * sequence_length: # sequence
# break
# X[key] = np.vstack((X[key], temp))
# # print(key, len(X[key]))
# if printable:
# print(' make each stage have same num of instances')
# for key in X:
# X[key] = X[key][0:most_num*sequence_length,:] # sequence
# if printable:
# print(key, len(X[key]))
# elif model=='downsampling':
# import random
# if printable:
# print(' downsampling -- less num is %d, all classes tend to be this num by randomly choice without replacement\nshuffle applied' % less_num)
# for key in X:
# # print(key, len(X[key]))#, len(X[key])/sequence_length)
# s_idx = [ i for i in range(int(len(X[key])/sequence_length))]
# s_idx = np.asarray(s_idx)*sequence_length # start index of sequnce in X[key]
# # print('s_idx',s_idx)
# r_idx = np.random.choice(s_idx, less_num, replace=False) # random choice less_num of s_idx
# # print('r_idx',r_idx)
# temp = X[key]
# X[key] = np.empty(shape=(0,1,n_features,1)) # 4D
# for idx in r_idx:
# X[key] = np.vstack((X[key], temp[idx:idx+sequence_length]))
# # print(key, X[key])
# # np.random.choice(l, len(l), replace=False)
# else:
# raise Exception(' model should be oversampling or downsampling')
#
# # convert dict to list
# if printable:
# print(' convert dict to list')
# y_train = []
# # X_train = np.empty(shape=(0,len(X[0][0])))
# # X_train = np.empty(shape=(0,len(X[1][0]))) # 2D
# X_train = np.empty(shape=(0,1,n_features,1)) # 4D
# l_key = list(X.keys()) # shuffle
# random.shuffle(l_key) # shuffle
# # for key in X: # no shuffle
# for key in l_key: # shuffle
# X_train = np.vstack( (X_train, X[key] ) )
# # print(len(X[key]))
# y_train.extend([key for i in range(int(len(X[key])/sequence_length))])
# # print(X_train,y_train, type(X_train), type(y_train))
# # ================ End of Classes balancing for sequence
# # print(X_train.shape, len(y_train))
# return X_train, np.asarray(y_train)
| apache-2.0 |
dryadb11781/machine-learning-python | Classification/py_source/plot_lda.py | 70 | 2413 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
martinahogg/machinelearning | linear-regression/l1-regularisation.py | 1 | 1108 | import numpy as np
import matplotlib.pyplot as plt
# Create some training samples
# To demonstrate L1 regularisation we fabricate training data
# with 50 dimensions in our X matrix, where only 3 of which
# contribute significantly to the values in our Y vector.
# Construct X
X = (np.random.random((50,50)) - 0.5) * 10
# Construct Y
actualW = np.array([1, 0.5, -0.5] + [0]*47)
Y = X.dot(actualW) + np.random.randn(50) * 0.5
# Use gradient descent with L1 regularisation to derive the
# weights from the training samples.
w = np.random.randn(50) / np.sqrt(50)
learning_rate = 0.0001
errors = []
for t in range(1000):
YHat = X.dot(w)
delta = YHat - Y
gradient = 2 * X.T.dot(delta)
l1 = 10 * np.sign(w);
w = w - (learning_rate * (gradient + l1))
error = delta.dot(delta) / 50
errors.append(error)
# Plot the mean squared error reducing over the 1000 iterations.
plt.plot(errors)
plt.show()
# Plot the predicted and actual values of Y
plt.plot(YHat, label='prediction')
plt.plot(Y, label='targets')
plt.show()
# Note how all but the first three derived weights are very
# close to zero.
print(w) | apache-2.0 |
zorojean/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/build/lib.linux-x86_64-2.7/kwiklib/dataio/tests/test_kwikloader.py | 2 | 6909 | """Unit tests for loader module."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from collections import Counter
import numpy as np
import numpy.random as rnd
import pandas as pd
import shutil
from nose.tools import with_setup
from mock_data import setup as setup_klusters
from mock_data import (teardown, TEST_FOLDER, nspikes, nclusters, nsamples,
nchannels, fetdim)
from kwiklib.dataio import (KwikLoader, Experiment, klusters_to_kwik,
check_dtype, check_shape, get_array, select, get_indices)
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
def setup():
setup_klusters()
klusters_to_kwik(filename='test', dir=TEST_FOLDER)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_kwik_loader_1():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KwikLoader(filename=xmlfile)
# Get full data sets.
features = l.get_features()
# features_some = l.get_some_features()
masks = l.get_masks()
waveforms = l.get_waveforms()
clusters = l.get_clusters()
spiketimes = l.get_spiketimes()
nclusters = len(Counter(clusters))
# probe = l.get_probe()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
cluster_sizes = l.get_cluster_sizes()
# Check the shape of the data sets.
# ---------------------------------
assert check_shape(features, (nspikes, nchannels * fetdim + 1))
# assert features_some.shape[1] == nchannels * fetdim + 1
assert check_shape(masks, (nspikes, nchannels * fetdim + 1))
assert check_shape(waveforms, (nspikes, nsamples, nchannels))
assert check_shape(clusters, (nspikes,))
assert check_shape(spiketimes, (nspikes,))
# assert check_shape(probe, (nchannels, 2))
assert check_shape(cluster_colors, (nclusters,))
assert check_shape(cluster_groups, (nclusters,))
assert check_shape(group_colors, (4,))
assert check_shape(group_names, (4,))
assert check_shape(cluster_sizes, (nclusters,))
# Check the data type of the data sets.
# -------------------------------------
assert check_dtype(features, np.float32)
assert check_dtype(masks, np.float32)
# HACK: Panel has no dtype(s) attribute
# assert check_dtype(waveforms, np.float32)
assert check_dtype(clusters, np.int32)
assert check_dtype(spiketimes, np.float64)
# assert check_dtype(probe, np.float32)
assert check_dtype(cluster_colors, np.int32)
assert check_dtype(cluster_groups, np.int32)
assert check_dtype(group_colors, np.int32)
assert check_dtype(group_names, object)
assert check_dtype(cluster_sizes, np.int32)
l.close()
def test_kwik_loader_control():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KwikLoader(filename=xmlfile)
# Take all spikes in cluster 3.
spikes = get_indices(l.get_clusters(clusters=3))
# Put them in cluster 4.
l.set_cluster(spikes, 4)
spikes_new = get_indices(l.get_clusters(clusters=4))
# Ensure all spikes in old cluster 3 are now in cluster 4.
assert np.all(np.in1d(spikes, spikes_new))
# Change cluster groups.
clusters = [2, 3, 4]
group = 0
l.set_cluster_groups(clusters, group)
groups = l.get_cluster_groups(clusters)
assert np.all(groups == group)
# Change cluster colors.
clusters = [2, 3, 4]
color = 12
l.set_cluster_colors(clusters, color)
colors = l.get_cluster_colors(clusters)
assert np.all(colors == color)
# Change group name.
group = 0
name = l.get_group_names(group)
name_new = 'Noise new'
assert name == 'Noise'
l.set_group_names(group, name_new)
assert l.get_group_names(group) == name_new
# Change group color.
groups = [1, 2]
colors = l.get_group_colors(groups)
color_new = 10
l.set_group_colors(groups, color_new)
assert np.all(l.get_group_colors(groups) == color_new)
# Add cluster and group.
spikes = get_indices(l.get_clusters(clusters=3))[:10]
# Create new group 100.
l.add_group(100, 'New group', 10)
# Create new cluster 10000 and put it in group 100.
l.add_cluster(10000, 100, 10)
# Put some spikes in the new cluster.
l.set_cluster(spikes, 10000)
clusters = l.get_clusters(spikes=spikes)
assert np.all(clusters == 10000)
groups = l.get_cluster_groups(10000)
assert groups == 100
l.set_cluster(spikes, 2)
# Remove the new cluster and group.
l.remove_cluster(10000)
l.remove_group(100)
assert np.all(~np.in1d(10000, l.get_clusters()))
assert np.all(~np.in1d(100, l.get_cluster_groups()))
l.close()
@with_setup(setup)
def test_kwik_save():
"""WARNING: this test should occur at the end of the module since it
changes the mock data sets."""
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KwikLoader(filename=xmlfile)
clusters = l.get_clusters()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
# Set clusters.
indices = get_indices(clusters)
l.set_cluster(indices[::2], 2)
l.set_cluster(indices[1::2], 3)
# Set cluster info.
cluster_indices = l.get_clusters_unique()
l.set_cluster_colors(cluster_indices[::2], 10)
l.set_cluster_colors(cluster_indices[1::2], 20)
l.set_cluster_groups(cluster_indices[::2], 1)
l.set_cluster_groups(cluster_indices[1::2], 0)
# Save.
l.remove_empty_clusters()
l.save()
clusters = l.get_clusters()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
assert np.all(clusters[::2] == 2)
assert np.all(clusters[1::2] == 3)
assert np.all(cluster_colors[::2] == 10)
assert np.all(cluster_colors[1::2] == 20)
print cluster_groups
assert np.all(cluster_groups[::2] == 1)
assert np.all(cluster_groups[1::2] == 0)
l.close()
| gpl-3.0 |
zzcclp/spark | python/pyspark/pandas/tests/test_spark_functions.py | 11 | 2127 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.utils import spark_column_equals
from pyspark.sql import functions as F
from pyspark.sql.types import (
ByteType,
FloatType,
IntegerType,
LongType,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class SparkFunctionsTests(PandasOnSparkTestCase):
def test_lit(self):
self.assertTrue(spark_column_equals(SF.lit(np.int64(1)), F.lit(1).astype(LongType())))
self.assertTrue(spark_column_equals(SF.lit(np.int32(1)), F.lit(1).astype(IntegerType())))
self.assertTrue(spark_column_equals(SF.lit(np.int8(1)), F.lit(1).astype(ByteType())))
self.assertTrue(spark_column_equals(SF.lit(np.byte(1)), F.lit(1).astype(ByteType())))
self.assertTrue(
spark_column_equals(SF.lit(np.float32(1)), F.lit(float(1)).astype(FloatType()))
)
self.assertTrue(spark_column_equals(SF.lit(1), F.lit(1)))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_spark_functions import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
tensorflow/models | research/lfads/plot_lfads.py | 12 | 6564 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
def _plot_item(W, name, full_name, nspaces):
plt.figure()
if W.shape == ():
print(name, ": ", W)
elif W.shape[0] == 1:
plt.stem(W.T)
plt.title(full_name)
elif W.shape[1] == 1:
plt.stem(W)
plt.title(full_name)
else:
plt.imshow(np.abs(W), interpolation='nearest', cmap='jet');
plt.colorbar()
plt.title(full_name)
def all_plot(d, full_name="", exclude="", nspaces=0):
"""Recursively plot all the LFADS model parameters in the nested
dictionary."""
for k, v in d.iteritems():
this_name = full_name+"/"+k
if isinstance(v, dict):
all_plot(v, full_name=this_name, exclude=exclude, nspaces=nspaces+4)
else:
if exclude == "" or exclude not in this_name:
_plot_item(v, name=k, full_name=full_name+"/"+k, nspaces=nspaces+4)
def plot_time_series(vals_bxtxn, bidx=None, n_to_plot=np.inf, scale=1.0,
color='r', title=None):
if bidx is None:
vals_txn = np.mean(vals_bxtxn, axis=0)
else:
vals_txn = vals_bxtxn[bidx,:,:]
T, N = vals_txn.shape
if n_to_plot > N:
n_to_plot = N
plt.plot(vals_txn[:,0:n_to_plot] + scale*np.array(range(n_to_plot)),
color=color, lw=1.0)
plt.axis('tight')
if title:
plt.title(title)
def plot_lfads_timeseries(data_bxtxn, model_vals, ext_input_bxtxi=None,
truth_bxtxn=None, bidx=None, output_dist="poisson",
conversion_factor=1.0, subplot_cidx=0,
col_title=None):
n_to_plot = 10
scale = 1.0
nrows = 7
plt.subplot(nrows,2,1+subplot_cidx)
if output_dist == 'poisson':
rates = means = conversion_factor * model_vals['output_dist_params']
plot_time_series(rates, bidx, n_to_plot=n_to_plot, scale=scale,
title=col_title + " rates (LFADS - red, Truth - black)")
elif output_dist == 'gaussian':
means_vars = model_vals['output_dist_params']
means, vars = np.split(means_vars,2, axis=2) # bxtxn
stds = np.sqrt(vars)
plot_time_series(means, bidx, n_to_plot=n_to_plot, scale=scale,
title=col_title + " means (LFADS - red, Truth - black)")
plot_time_series(means+stds, bidx, n_to_plot=n_to_plot, scale=scale,
color='c')
plot_time_series(means-stds, bidx, n_to_plot=n_to_plot, scale=scale,
color='c')
else:
assert 'NIY'
if truth_bxtxn is not None:
plot_time_series(truth_bxtxn, bidx, n_to_plot=n_to_plot, color='k',
scale=scale)
input_title = ""
if "controller_outputs" in model_vals.keys():
input_title += " Controller Output"
plt.subplot(nrows,2,3+subplot_cidx)
u_t = model_vals['controller_outputs'][0:-1]
plot_time_series(u_t, bidx, n_to_plot=n_to_plot, color='c', scale=1.0,
title=col_title + input_title)
if ext_input_bxtxi is not None:
input_title += " External Input"
plot_time_series(ext_input_bxtxi, n_to_plot=n_to_plot, color='b',
scale=scale, title=col_title + input_title)
plt.subplot(nrows,2,5+subplot_cidx)
plot_time_series(means, bidx,
n_to_plot=n_to_plot, scale=1.0,
title=col_title + " Spikes (LFADS - red, Spikes - black)")
plot_time_series(data_bxtxn, bidx, n_to_plot=n_to_plot, color='k', scale=1.0)
plt.subplot(nrows,2,7+subplot_cidx)
plot_time_series(model_vals['factors'], bidx, n_to_plot=n_to_plot, color='b',
scale=2.0, title=col_title + " Factors")
plt.subplot(nrows,2,9+subplot_cidx)
plot_time_series(model_vals['gen_states'], bidx, n_to_plot=n_to_plot,
color='g', scale=1.0, title=col_title + " Generator State")
if bidx is not None:
data_nxt = data_bxtxn[bidx,:,:].T
params_nxt = model_vals['output_dist_params'][bidx,:,:].T
else:
data_nxt = np.mean(data_bxtxn, axis=0).T
params_nxt = np.mean(model_vals['output_dist_params'], axis=0).T
if output_dist == 'poisson':
means_nxt = params_nxt
elif output_dist == 'gaussian': # (means+vars) x time
means_nxt = np.vsplit(params_nxt,2)[0] # get means
else:
assert "NIY"
plt.subplot(nrows,2,11+subplot_cidx)
plt.imshow(data_nxt, aspect='auto', interpolation='nearest')
plt.title(col_title + ' Data')
plt.subplot(nrows,2,13+subplot_cidx)
plt.imshow(means_nxt, aspect='auto', interpolation='nearest')
plt.title(col_title + ' Means')
def plot_lfads(train_bxtxd, train_model_vals,
train_ext_input_bxtxi=None, train_truth_bxtxd=None,
valid_bxtxd=None, valid_model_vals=None,
valid_ext_input_bxtxi=None, valid_truth_bxtxd=None,
bidx=None, cf=1.0, output_dist='poisson'):
# Plotting
f = plt.figure(figsize=(18,20), tight_layout=True)
plot_lfads_timeseries(train_bxtxd, train_model_vals,
train_ext_input_bxtxi,
truth_bxtxn=train_truth_bxtxd,
conversion_factor=cf, bidx=bidx,
output_dist=output_dist, col_title='Train')
plot_lfads_timeseries(valid_bxtxd, valid_model_vals,
valid_ext_input_bxtxi,
truth_bxtxn=valid_truth_bxtxd,
conversion_factor=cf, bidx=bidx,
output_dist=output_dist,
subplot_cidx=1, col_title='Valid')
# Convert from figure to an numpy array width x height x 3 (last for RGB)
f.canvas.draw()
data = np.fromstring(f.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data_wxhx3 = data.reshape(f.canvas.get_width_height()[::-1] + (3,))
plt.close()
return data_wxhx3
| apache-2.0 |
MicheleDamian/ConnectopicMapping | scripts/run.py | 1 | 3900 | #!/usr/bin/env python
""" Run the pipeline for Haak connectopic mapping.
Consider changing the parameters contained in config.json to
set input and output folder and experiment with different behaviors
of the algorithm.
"""
import json
import os
import numpy
from connectopic_mapping import haak, utils
from matplotlib import pyplot
with open('config.json') as config_file:
config = json.load(config_file)
#
# Define general parameters
#
subject = config["subject"]
session = config["session"]
scans = config["scans"]
hemisphere = config["hemisphere"]
atlas_name = config["atlas_name"]
roi_name = config["roi_name"]
#
# Define Haak parameters
#
num_lower_dim = config["num_lower_dim"]
num_processes = config["num_processes"]
manifold_learning = config["manifold_learning"]
manifold_components = config["manifold_components"]
out_path = config["out_path"]
verbose = config["verbose"]
#
# Define input/output locations
#
image_path = config["nifti_dir_path"]
image_path_0 = image_path + \
'/rfMRI_{1}_{2}_hp2000_clean.nii.gz' \
.format(subject, session, scans[0])
image_path_1 = image_path + \
'/rfMRI_{1}_{2}_hp2000_clean.nii.gz' \
.format(subject, session, scans[1])
out_path = config["out_path"] + \
'/rfMRI_{0}_{1}_{2}' \
.format(subject, session, hemisphere)
#
# Load ROI and brain masks
#
print("Loading brain and ROI masks from atlas...", end="", flush=True)
brain_mask, roi_mask = utils.load_masks(atlas_name, roi_name, hemisphere)
print("\rLoading brain and ROI masks from atlas... Done!", flush=True)
#
# Load Nifti images, smooth with FWHM=6, compute % temporal change
#
print("Loading Nifti images (1/2)...", end="", flush=True)
data_info_0 = utils.normalize_nifti_image(image_path_0, fwhm=6)
print("\rLoading Nifti images (2/2)...", end="", flush=True)
data_info_1 = utils.normalize_nifti_image(image_path_1, fwhm=6)
print("\rLoading Nifti images... Done!", flush=True)
#
# Concatenate data from the two scans along the temporal axis
#
print("Concatenating Nifti images...", end="", flush=True)
brain_mask, roi_mask, data = utils.concatenate_data(brain_mask, roi_mask,
*data_info_0, *data_info_1)
# Dereference unnecessary data
del data_info_0, data_info_1
print("\rConcatenating Nifti images... Done!", flush=True)
print('Number brain voxels = {0}'.format(numpy.sum(brain_mask)),
flush=True)
print('Number ROI voxels = {0}'.format(numpy.sum(roi_mask)),
flush=True)
os.makedirs(out_path, exist_ok=True)
numpy.save(out_path + '/roi_mask.npy', roi_mask)
numpy.save(out_path + '/brain_mask.npy', brain_mask)
#
# Compute Haak mapping
#
haak_mapping = haak.Haak(num_lower_dim=num_lower_dim,
num_processes=num_processes,
manifold_learning=manifold_learning,
manifold_components=manifold_components,
out_path=out_path,
verbose=1)
eta2_coef, embedding, connectopic_map, connectopic_var = haak_mapping.fit_transform(data, roi_mask)
#
# Visualize connectopic mapping
#
i_plot = 1
for config_figures in config['figures']:
slice_indexes = [config_figures['axis_x'],
config_figures['axis_y'],
config_figures['axis_z']]
if hemisphere == 'RH':
slice_indexes[0] = brain_mask.shape[0] - slice_indexes[0]
#
# Display connectopy
#
fig = pyplot.figure(i_plot, tight_layout=True)
utils.visualize_volume(connectopic_map, brain_mask, roi_mask, slice_indexes,
low_percentile=5, high_percentile=95,
num_fig=fig,
margin=2,
legend_location=config_figures['legend_location'])
i_plot += 1
pyplot.show()
| apache-2.0 |
jseabold/scipy | scipy/special/basic.py | 9 | 62504 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta,
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch,
binom)
from . import specfun
from . import orthogonal
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros',
'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where n is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8,8));
>>> for idx, n in enumerate([2,3,4,9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute nt zeros of Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length nt, corresponding to the first nt zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v,z,n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Compute nth derivative of modified Bessel function Kv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
def sph_in(n,z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n,z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,kn,knp = specfun.sphk(n1,z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n,z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
nm,kn,knp = specfun.sphk(n1,z)
return In[:(n+1)],Inp[:(n+1)],kn[:(n+1)],knp[:(n+1)]
def riccati_jn(n,x):
"""Compute Ricatti-Bessel function of the first kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rctj(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def riccati_yn(n,x):
"""Compute Ricatti-Bessel function of the second kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rcty(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1,nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt), specfun.fcszo(1,nt)
def hyp0f1(v, z):
r"""Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.
It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
the differential equation :math:`f''(z) + vf'(z) = f(z)`.
"""
v = atleast_1d(v)
z = atleast_1d(z)
v, z = np.broadcast_arrays(v, z)
arg = 2 * sqrt(abs(z))
old_err = np.seterr(all='ignore') # for z=0, a<1 and num=inf, next lines
num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
den = abs(z)**((v - 1.0) / 2)
num *= gamma(v)
np.seterr(**old_err)
num[z == 0] = 1
den[z == 0] = 1
return num / den
def assoc_laguerre(x, n, k=0.0):
"""Compute nth-order generalized (associated) Laguerre polynomial.
The polynomial :math:`L^(alpha)_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**alpha`` with ``alpha > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1,x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m,q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m,q)
fc = specfun.fcoef(kd,m,q,a)
return fc[:km]
def mathieu_odd_coef(m,q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m,q)
fc = specfun.fcoef(kd,m,q,b)
return fc[:km]
def lpmn(m,n,z):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf,0.0,(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf,0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.lpmn(mp,n,z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf,0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf,0.0,gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.clpmn(mp,n,real(z),imag(z),type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def lqmn(m,n,z):
"""Associated Legendre function of the second kind, Qmn(z).
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1,m)
nn = max(1,n)
if iscomplex(z):
q,qd = specfun.clqmn(mm,nn,z)
else:
q,qd = specfun.lqmn(mm,nn,z)
return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n,z):
"""Legendre functions of the first kind, Pn(z).
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn,pd = specfun.clpn(n1,z)
else:
pn,pd = specfun.lpn(n1,z)
return pn[:(n+1)],pd[:(n+1)]
## lpni
def lqn(n,z):
"""Legendre functions of the second kind, Qn(z).
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn,qd = specfun.clqn(n1,z)
else:
qn,qd = specfun.lqnb(n1,z)
return qn[:(n+1)],qd[:(n+1)]
def ai_zeros(nt):
"""Compute nt zeros of Airy function Ai(x) and derivative, and corresponding values.
Computes the first nt zeros, a, of the Airy function Ai(x); first nt zeros,
a', of the derivative of the Airy function Ai'(x); the corresponding values
Ai(a'); and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First nt zeros of Ai(x)
ap : ndarray
First nt zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first nt zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first nt zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def bi_zeros(nt):
"""Compute nt zeros of Airy function Bi(x) and derivative, and corresponding values.
Computes the first nt zeros, b, of the Airy function Bi(x); first nt zeros,
b', of the derivative of the Airy function Bi'(x); the corresponding values
Bi(b'); and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First nt zeros of Bi(x)
bp : ndarray
First nt zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first nt zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first nt zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def lmbda(v,x):
"""Jahnke-Emden Lambda function, Lambdav(x).
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1,x)
else:
vm, vl, dl = specfun.lamn(v1,x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v,x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbdv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbvv_seq(v,x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbvv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbdn_seq(n,z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb,cpd = specfun.cpbdn(n1,z)
return cpb[:n1+1],cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1), \
specfun.klvnzo(nt,2), \
specfun.klvnzo(nt,3), \
specfun.klvnzo(nt,4), \
specfun.klvnzo(nt,5), \
specfun.klvnzo(nt,6), \
specfun.klvnzo(nt,7), \
specfun.klvnzo(nt,8)
def pro_cv_seq(m,n,c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,1)[1][:maxL]
def obl_cv_seq(m,n,c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,-1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a,b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
N = int(N)
k = int(k)
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
k,N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def factorial(n,exact=False):
"""The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=False case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3,4,5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0
val = 1
for k in xrange(1,n+1):
val *= k
return val
else:
n = asarray(n)
vals = gamma(n+1)
return where(n >= 0,vals,0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n,0,-2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_console_config.py | 24 | 21691 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
LeeKamentsky/CellProfiler | cellprofiler/modules/saveimages.py | 1 | 60223 | '''<b>Save Images </b> saves image or movie files.
<hr>
Because CellProfiler usually performs many image analysis steps on many
groups of images, it does <i>not</i> save any of the resulting images to the
hard drive unless you specifically choose to do so with the <b>SaveImages</b>
module. You can save any of the
processed images created by CellProfiler during the analysis using this module.
<p>You can choose from many different image formats for saving your files. This
allows you to use the module as a file format converter, by loading files
in their original format and then saving them in an alternate format.</p>
<p>Note that saving images in 12-bit format is not supported, and 16-bit format
is supported for TIFF only.</p>
See also <b>NamesAndTypes</b>, <b>ConserveMemory</b>.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
import matplotlib
import numpy as np
import re
import os
import sys
import scipy.io.matlab.mio
import traceback
logger = logging.getLogger(__name__)
import cellprofiler.cpmodule as cpm
import cellprofiler.measurements as cpmeas
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.preferences as cpp
from cellprofiler.gui.help import USING_METADATA_TAGS_REF, USING_METADATA_HELP_REF
from cellprofiler.preferences import \
standardize_default_folder_names, DEFAULT_INPUT_FOLDER_NAME, \
DEFAULT_OUTPUT_FOLDER_NAME, ABSOLUTE_FOLDER_NAME, \
DEFAULT_INPUT_SUBFOLDER_NAME, DEFAULT_OUTPUT_SUBFOLDER_NAME, \
IO_FOLDER_CHOICE_HELP_TEXT, IO_WITH_METADATA_HELP_TEXT, \
get_default_image_directory
from cellprofiler.utilities.relpath import relpath
from cellprofiler.modules.loadimages import C_FILE_NAME, C_PATH_NAME, C_URL
from cellprofiler.modules.loadimages import \
C_OBJECTS_FILE_NAME, C_OBJECTS_PATH_NAME, C_OBJECTS_URL
from cellprofiler.modules.loadimages import pathname2url
from cellprofiler.cpmath.cpmorphology import distance_color_labels
from cellprofiler.utilities.version import get_version
from bioformats.formatwriter import write_image
import bioformats.omexml as ome
IF_IMAGE = "Image"
IF_MASK = "Mask"
IF_CROPPING = "Cropping"
IF_FIGURE = "Module window"
IF_MOVIE = "Movie"
IF_OBJECTS = "Objects"
IF_ALL = [IF_IMAGE, IF_MASK, IF_CROPPING, IF_MOVIE, IF_OBJECTS]
OLD_BIT_DEPTH_8 = "8"
OLD_BIT_DEPTH_16 = "16"
BIT_DEPTH_8 = "8-bit integer"
BIT_DEPTH_16 = "16-bit integer"
BIT_DEPTH_FLOAT = "32-bit floating point"
FN_FROM_IMAGE = "From image filename"
FN_SEQUENTIAL = "Sequential numbers"
FN_SINGLE_NAME = "Single name"
SINGLE_NAME_TEXT = "Enter single file name"
FN_WITH_METADATA = "Name with metadata"
FN_IMAGE_FILENAME_WITH_METADATA = "Image filename with metadata"
METADATA_NAME_TEXT = ("""Enter file name with metadata""")
SEQUENTIAL_NUMBER_TEXT = "Enter file prefix"
FF_BMP = "bmp"
FF_JPG = "jpg"
FF_JPEG = "jpeg"
FF_PBM = "pbm"
FF_PCX = "pcx"
FF_PGM = "pgm"
FF_PNG = "png"
FF_PNM = "pnm"
FF_PPM = "ppm"
FF_RAS = "ras"
FF_TIF = "tif"
FF_TIFF = "tiff"
FF_XWD = "xwd"
FF_AVI = "avi"
FF_MAT = "mat"
FF_MOV = "mov"
FF_SUPPORTING_16_BIT = [FF_TIF, FF_TIFF]
PC_WITH_IMAGE = "Same folder as image"
OLD_PC_WITH_IMAGE_VALUES = ["Same folder as image"]
PC_CUSTOM = "Custom"
PC_WITH_METADATA = "Custom with metadata"
WS_EVERY_CYCLE = "Every cycle"
WS_FIRST_CYCLE = "First cycle"
WS_LAST_CYCLE = "Last cycle"
CM_GRAY = "gray"
GC_GRAYSCALE = "Grayscale"
GC_COLOR = "Color"
'''Offset to the directory path setting'''
OFFSET_DIRECTORY_PATH = 11
'''Offset to the bit depth setting in version 11'''
OFFSET_BIT_DEPTH_V11 = 12
class SaveImages(cpm.CPModule):
module_name = "SaveImages"
variable_revision_number = 11
category = "File Processing"
def create_settings(self):
self.save_image_or_figure = cps.Choice(
"Select the type of image to save",
IF_ALL,
IF_IMAGE,doc="""
The following types of images can be saved as a file on the hard drive:
<ul>
<li><i>%(IF_IMAGE)s:</i> Any of the images produced upstream of <b>SaveImages</b> can be selected for saving.
Outlines created by <b>Identify</b> modules can also be saved with this option, but you must
select "Retain outlines..." of identified objects within the <b>Identify</b> module. You might
also want to use the <b>OverlayOutlines</b> module prior to saving images.</li>
<li><i>%(IF_MASK)s:</i> Relevant only if the <b>Crop</b> module is used. The <b>Crop</b> module
creates a mask of the pixels of interest in the image. Saving the mask will produce a
binary image in which the pixels of interest are set to 1; all other pixels are
set to 0.</li>
<li><i>%(IF_CROPPING)s:</i> Relevant only if the <b>Crop</b> module is used. The <b>Crop</b>
module also creates a cropping image which is typically the same size as the original
image. However, since the <b>Crop</b> permits removal of the rows and columns that are left
blank, the cropping can be of a different size than the mask.</li>
<li><i>%(IF_MOVIE)s:</i> A sequence of images can be saved as a movie file. Currently only AVIs can be written.
Each image becomes a frame of the movie.</li>
<li><i>%(IF_OBJECTS)s:</i> Objects can be saved as an image. The image
is saved as grayscale unless you select a color map other than
gray. Background pixels appear as black and
each object is assigned an intensity level corresponding to
its object number. The resulting image can be loaded as objects
by the <b>NamesAndTypes</b> module. Objects are best saved as TIF
files. <b>SaveImages</b> will use an 8-bit TIF file if there
are fewer than 256 objects and will use a 16-bit TIF otherwise.
Results may be unpredictable if you save using PNG and there
are more than 255 objects or if you save using one of the other
file formats.</li>
</ul>"""%globals())
self.image_name = cps.ImageNameSubscriber(
"Select the image to save",cps.NONE, doc = """
<i>(Used only if "%(IF_IMAGE)s", "%(IF_MASK)s" or "%(IF_CROPPING)s" are selected to save)</i><br>
Select the image you want to save."""%globals())
self.objects_name = cps.ObjectNameSubscriber(
"Select the objects to save", cps.NONE,doc = """
<i>(Used only if saving "%(IF_OBJECTS)s")</i><br>
Select the objects that you want to save."""%globals())
self.figure_name = cps.FigureSubscriber(
"Select the module display window to save",cps.NONE,doc="""
<i>(Used only if saving "%(IF_FIGURE)s")</i><br>
Enter the module number/name for which you want to
save the module display window."""%globals())
self.file_name_method = cps.Choice(
"Select method for constructing file names",
[FN_FROM_IMAGE, FN_SEQUENTIAL,
FN_SINGLE_NAME],
FN_FROM_IMAGE,doc="""
<i>(Used only if saving non-movie files)</i><br>
Several choices are available for constructing the image file name:
<ul>
<li><i>%(FN_FROM_IMAGE)s:</i> The filename will be constructed based
on the original filename of an input image specified in <b>NamesAndTypes</b>.
You will have the opportunity to prefix or append
additional text.
<p>If you have metadata associated with your images, you can append an text
to the image filename using a metadata tag. This is especially useful if you
want your output given a unique label according to the metadata corresponding
to an image group. The name of the metadata to substitute can be provided for
each image for each cycle using the <b>Metadata</b> module.
%(USING_METADATA_TAGS_REF)s%(USING_METADATA_HELP_REF)s.</p></li>
<li><i>%(FN_SEQUENTIAL)s:</i> Same as above, but in addition, each filename
will have a number appended to the end that corresponds to
the image cycle number (starting at 1).</li>
<li><i>%(FN_SINGLE_NAME)s:</i> A single name will be given to the
file. Since the filename is fixed, this file will be overwritten with each cycle.
In this case, you would probably want to save the image on the last cycle
(see the <i>Select how often to save</i> setting). The exception to this is to
use a metadata tag to provide a unique label, as mentioned
in the <i>%(FN_FROM_IMAGE)s</i> option.</li>
</ul>"""%globals())
self.file_image_name = cps.FileImageNameSubscriber(
"Select image name for file prefix",
cps.NONE,doc="""
<i>(Used only when "%(FN_FROM_IMAGE)s" is selected for contructing the filename)</i><br>
Select an image loaded using <b>NamesAndTypes</b>. The original filename will be
used as the prefix for the output filename."""%globals())
self.single_file_name = cps.Text(
SINGLE_NAME_TEXT, "OrigBlue",
metadata = True, doc="""
<i>(Used only when "%(FN_SEQUENTIAL)s" or "%(FN_SINGLE_NAME)s" are selected for contructing the filename)</i><br>
Specify the filename text here. If you have metadata
associated with your images, enter the filename text with the metadata tags. %(USING_METADATA_TAGS_REF)s<br>
Do not enter the file extension in this setting; it will be appended automatically."""%globals())
self.number_of_digits = cps.Integer(
"Number of digits", 4, doc="""
<i>(Used only when "%(FN_SEQUENTIAL)s" is selected for contructing the filename)</i><br>
Specify the number of digits to be used for the sequential numbering. Zeros will be
used to left-pad the digits. If the number specified here is less than that needed to
contain the number of image sets, the latter will override the value entered."""%globals())
self.wants_file_name_suffix = cps.Binary(
"Append a suffix to the image file name?", False, doc = """
Select <i>%(YES)s</i> to add a suffix to the image's file name.
Select <i>%(NO)s</i> to use the image name as-is."""%globals())
self.file_name_suffix = cps.Text(
"Text to append to the image name",
"", metadata = True, doc="""
<i>(Used only when constructing the filename from the image filename)</i><br>
Enter the text that should be appended to the filename specified above.""")
self.file_format = cps.Choice(
"Saved file format",
[FF_BMP, FF_JPG, FF_JPEG, FF_PNG, FF_TIF, FF_TIFF, FF_MAT],
value = FF_TIF, doc="""
<i>(Used only when saving non-movie files)</i><br>
Select the image or movie format to save the image(s). Most common
image formats are available; MAT-files are readable by MATLAB.""")
self.movie_format = cps.Choice(
"Saved movie format",
[FF_AVI, FF_TIF, FF_MOV],
value = FF_AVI, doc="""
<i>(Used only when saving movie files)</i><br>
Select the movie format to use when saving movies. AVI and MOV
store images from successive image sets as movie frames. TIF
stores each image as an image plane in a TIF stack.
""")
self.pathname = SaveImagesDirectoryPath(
"Output file location", self.file_image_name,doc = """
<i>(Used only when saving non-movie files)</i><br>
This setting lets you choose the folder for the output
files. %(IO_FOLDER_CHOICE_HELP_TEXT)s
<p>An additional option is the following:
<ul>
<li><i>Same folder as image</i>: Place the output file in the same folder
that the source image is located.</li>
</ul></p>
<p>%(IO_WITH_METADATA_HELP_TEXT)s %(USING_METADATA_TAGS_REF)s.
For instance, if you have a metadata tag named
"Plate", you can create a per-plate folder by selecting one the subfolder options
and then specifying the subfolder name as "\g<Plate>". The module will
substitute the metadata values for the current image set for any metadata tags in the
folder name.%(USING_METADATA_HELP_REF)s.</p>
<p>If the subfolder does not exist when the pipeline is run, CellProfiler will
create it.</p>
<p>If you are creating nested subfolders using the sub-folder options, you can
specify the additional folders separated with slashes. For example, "Outlines/Plate1" will create
a "Plate1" folder in the "Outlines" folder, which in turn is under the Default
Input/Output Folder. The use of a forward slash ("/") as a folder separator will
avoid ambiguity between the various operating systems.</p>"""%globals())
# TODO:
self.bit_depth = cps.Choice(
"Image bit depth",
[BIT_DEPTH_8, BIT_DEPTH_16, BIT_DEPTH_FLOAT],doc="""
<i>(Used only when saving files in a non-MAT format)</i><br>
Select the bit-depth at which you want to save the images.
<i>%(BIT_DEPTH_FLOAT)s</i> saves the image as floating-point decimals
with 32-bit precision in its raw form, typically scaled between
0 and 1.
<b>%(BIT_DEPTH_16)s and %(BIT_DEPTH_FLOAT)s images are supported only
for TIF formats. Currently, saving images in 12-bit is not supported.</b>""" %
globals())
self.overwrite = cps.Binary(
"Overwrite existing files without warning?",False,doc="""
Select <i>%(YES)s</i> to automatically overwrite a file if it already exists.
Select <i>%(NO)s</i> to be prompted for confirmation first.
<p>If you are running the pipeline on a computing cluster,
select <i>%(YES)s</i> since you will not be able to intervene and answer the confirmation prompt.</p>"""%globals())
self.when_to_save = cps.Choice(
"When to save",
[WS_EVERY_CYCLE,WS_FIRST_CYCLE,WS_LAST_CYCLE],
WS_EVERY_CYCLE, doc="""<a name='when_to_save'>
<i>(Used only when saving non-movie files)</i><br>
Specify at what point during pipeline execution to save file(s). </a>
<ul>
<li><i>%(WS_EVERY_CYCLE)s:</i> Useful for when the image of interest is created every cycle and is
not dependent on results from a prior cycle.</li>
<li><i>%(WS_FIRST_CYCLE)s:</i> Useful for when you are saving an aggregate image created
on the first cycle, e.g., <b>CorrectIlluminationCalculate</b> with the <i>All</i>
setting used on images obtained directly from <b>NamesAndTypes</b>.</li>
<li><i>%(WS_LAST_CYCLE)s</i> Useful for when you are saving an aggregate image completed
on the last cycle, e.g., <b>CorrectIlluminationCalculate</b> with the <i>All</i>
setting used on intermediate images generated during each cycle.</li>
</ul> """%globals())
self.rescale = cps.Binary(
"Rescale the images? ",False,doc="""
<i>(Used only when saving non-MAT file images)</i><br>
Select <i>%(YES)s</i> if you want the image to occupy the full dynamic range of the bit
depth you have chosen. For example, if you save an image to an 8-bit file, the
smallest grayscale value will be mapped to 0 and the largest value will be mapped
to 2<sup>8</sup>-1 = 255.
<p>This will increase the contrast of the output image but will also effectively
stretch the image data, which may not be desirable in some
circumstances. See <b>RescaleIntensity</b> for other rescaling options.</p>"""%globals())
self.gray_or_color = cps.Choice(
"Save as grayscale or color image?",
[GC_GRAYSCALE, GC_COLOR],doc = """
<i>(Used only when saving "%(IF_OBJECTS)s")</i><br>
You can save objects as a grayscale image or as a color image.
<ul>
<li><i>%(GC_GRAYSCALE)s: </i> Use the pixel's object number
(label) for the grayscale intensity. Background pixels are
colored black. Grayscale images are more
suitable if you are going to load the image as objects using
<b>NamesAndTypes</b> or some other program that will be used to
relate object measurements to the pixels in the image.
You should save grayscale images using the .TIF or .MAT formats
if possible; otherwise you may have problems saving files
with more than 255 objects.</li>
<li><i>%(GC_COLOR)s:</i> Assigns different colors to different
objects.</li>
</ul>"""%globals())
self.colormap = cps.Colormap(
'Select colormap',
value = CM_GRAY,doc= """
<i>(Used only when saving non-MAT file images)</i><br>
This affects how images color intensities are displayed. All available colormaps can be seen
<a href="http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps">here</a>.""")
self.update_file_names = cps.Binary(
"Record the file and path information to the saved image?",False,doc="""
Select <i>%(YES)s</i>to store filename and pathname data for each of the new files created
via this module as a per-image measurement.
<p>Instances in which this information may be useful include:
<ul>
<li>Exporting measurements to a database, allowing
access to the saved image. If you are using the machine-learning tools or image
viewer in CellProfiler Analyst, for example, you will want to enable this setting if you want
the saved images to be displayed along with the original images.</li>
<li>Allowing downstream modules (e.g., <b>CreateWebPage</b>) to access
the newly saved files.</li>
</ul></p>"""%globals())
self.create_subdirectories = cps.Binary(
"Create subfolders in the output folder?",False,doc = """
Select <i>%(YES)s</i> to create subfolders to match the input image folder structure."""%globals())
self.root_dir = cps.DirectoryPath(
"Base image folder", doc = """
<i>Used only if creating subfolders in the output folder</i>
In subfolder mode, <b>SaveImages</b> determines the folder for
an image file by examining the path of the matching input file.
The path that SaveImages uses is relative to the image folder
chosen using this setting. As an example, input images might be stored
in a folder structure of "images%(sep)s<i>experiment-name</i>%(sep)s
<i>date</i>%(sep)s<i>plate-name</i>". If the image folder is
"images", <b>SaveImages</b> will store images in the subfolder,
"<i>experiment-name</i>%(sep)s<i>date</i>%(sep)s<i>plate-name</i>".
If the image folder is "images%(sep)s<i>experiment-name</i>",
<b>SaveImages</b> will store images in the subfolder,
<i>date</i>%(sep)s<i>plate-name</i>".
""" % dict(sep=os.path.sep))
def settings(self):
"""Return the settings in the order to use when saving"""
return [self.save_image_or_figure, self.image_name,
self.objects_name, self.figure_name,
self.file_name_method, self.file_image_name,
self.single_file_name, self.number_of_digits,
self.wants_file_name_suffix,
self.file_name_suffix, self.file_format,
self.pathname, self.bit_depth,
self.overwrite, self.when_to_save,
self.rescale, self.gray_or_color, self.colormap,
self.update_file_names, self.create_subdirectories,
self.root_dir, self.movie_format]
def visible_settings(self):
"""Return only the settings that should be shown"""
result = [self.save_image_or_figure]
if self.save_image_or_figure == IF_FIGURE:
result.append(self.figure_name)
elif self.save_image_or_figure == IF_OBJECTS:
result.append(self.objects_name)
else:
result.append(self.image_name)
result.append(self.file_name_method)
if self.file_name_method == FN_FROM_IMAGE:
result += [self.file_image_name, self.wants_file_name_suffix]
if self.wants_file_name_suffix:
result.append(self.file_name_suffix)
elif self.file_name_method == FN_SEQUENTIAL:
self.single_file_name.text = SEQUENTIAL_NUMBER_TEXT
# XXX - Change doc, as well!
result.append(self.single_file_name)
result.append(self.number_of_digits)
elif self.file_name_method == FN_SINGLE_NAME:
self.single_file_name.text = SINGLE_NAME_TEXT
result.append(self.single_file_name)
else:
raise NotImplementedError("Unhandled file name method: %s"%(self.file_name_method))
if self.save_image_or_figure == IF_MOVIE:
result.append(self.movie_format)
else:
result.append(self.file_format)
supports_16_bit = (self.file_format in FF_SUPPORTING_16_BIT and
self.save_image_or_figure == IF_IMAGE)
if supports_16_bit:
# TIFF supports 8 & 16-bit, all others are written 8-bit
result.append(self.bit_depth)
result.append(self.pathname)
result.append(self.overwrite)
if self.save_image_or_figure != IF_MOVIE:
result.append(self.when_to_save)
if (self.save_image_or_figure == IF_IMAGE and
self.file_format != FF_MAT):
result.append(self.rescale)
if self.get_bit_depth() == "8":
result.append(self.colormap)
elif self.save_image_or_figure == IF_OBJECTS:
result.append(self.gray_or_color)
if self.gray_or_color == GC_COLOR:
result.append(self.colormap)
result.append(self.update_file_names)
if self.file_name_method == FN_FROM_IMAGE:
result.append(self.create_subdirectories)
if self.create_subdirectories:
result.append(self.root_dir)
return result
@property
def module_key(self):
return "%s_%d"%(self.module_name, self.module_num)
def prepare_group(self, workspace, grouping, image_numbers):
d = self.get_dictionary(workspace.image_set_list)
if self.save_image_or_figure == IF_MOVIE:
d['N_FRAMES'] = len(image_numbers)
d['CURRENT_FRAME'] = 0
return True
def prepare_to_create_batch(self, workspace, fn_alter_path):
self.pathname.alter_for_create_batch_files(fn_alter_path)
if self.create_subdirectories:
self.root_dir.alter_for_create_batch_files(fn_alter_path)
def run(self,workspace):
"""Run the module
pipeline - instance of CellProfiler.Pipeline for this run
workspace - the workspace contains:
image_set - the images in the image set being processed
object_set - the objects (labeled masks) in this image set
measurements - the measurements for this run
frame - display within this frame (or None to not display)
"""
if self.save_image_or_figure.value in (IF_IMAGE, IF_MASK, IF_CROPPING):
should_save = self.run_image(workspace)
elif self.save_image_or_figure == IF_MOVIE:
should_save = self.run_movie(workspace)
elif self.save_image_or_figure == IF_OBJECTS:
should_save = self.run_objects(workspace)
else:
raise NotImplementedError(("Saving a %s is not yet supported"%
(self.save_image_or_figure)))
workspace.display_data.filename = self.get_filename(
workspace, make_dirs = False, check_overwrite = False)
def is_aggregation_module(self):
'''SaveImages is an aggregation module when it writes movies'''
return self.save_image_or_figure == IF_MOVIE or \
self.when_to_save == WS_LAST_CYCLE
def display(self, workspace, figure):
if self.show_window:
if self.save_image_or_figure == IF_MOVIE:
return
figure.set_subplots((1, 1))
outcome = ("Wrote %s" if workspace.display_data.wrote_image
else "Did not write %s")
figure.subplot_table(0, 0, [[outcome %
(workspace.display_data.filename)]])
def run_image(self,workspace):
"""Handle saving an image"""
#
# First, check to see if we should save this image
#
if self.when_to_save == WS_FIRST_CYCLE:
d = self.get_dictionary(workspace.image_set_list)
if workspace.measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX] > 1:
workspace.display_data.wrote_image = False
self.save_filename_measurements(workspace)
return
d["FIRST_IMAGE"] = False
elif self.when_to_save == WS_LAST_CYCLE:
workspace.display_data.wrote_image = False
self.save_filename_measurements( workspace)
return
self.save_image(workspace)
return True
def run_movie(self, workspace):
out_file = self.get_filename(workspace, check_overwrite=False)
# overwrite checks are made only for first frame.
d = self.get_dictionary(workspace.image_set_list)
if d["CURRENT_FRAME"] == 0 and os.path.exists(out_file):
if not self.check_overwrite(out_file, workspace):
d["CURRENT_FRAME"] = "Ignore"
return
else:
# Have to delete the old movie before making the new one
os.remove(out_file)
elif d["CURRENT_FRAME"] == "Ignore":
return
image = workspace.image_set.get_image(self.image_name.value)
pixels = image.pixel_data
pixels = pixels * 255
frames = d['N_FRAMES']
current_frame = d["CURRENT_FRAME"]
d["CURRENT_FRAME"] += 1
self.do_save_image(workspace, out_file, pixels, ome.PT_UINT8,
t = current_frame, size_t = frames)
def run_objects(self, workspace):
#
# First, check to see if we should save this image
#
if self.when_to_save == WS_FIRST_CYCLE:
if workspace.measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX] > 1:
workspace.display_data.wrote_image = False
self.save_filename_measurements(workspace)
return
elif self.when_to_save == WS_LAST_CYCLE:
workspace.display_data.wrote_image = False
self.save_filename_measurements( workspace)
return
self.save_objects(workspace)
def save_objects(self, workspace):
objects_name = self.objects_name.value
objects = workspace.object_set.get_objects(objects_name)
filename = self.get_filename(workspace)
if filename is None: # failed overwrite check
return
labels = [l for l, c in objects.get_labels()]
if self.get_file_format() == FF_MAT:
pixels = objects.segmented
scipy.io.matlab.mio.savemat(filename,{"Image":pixels},format='5')
elif self.gray_or_color == GC_GRAYSCALE:
if objects.count > 255:
pixel_type = ome.PT_UINT16
else:
pixel_type = ome.PT_UINT8
for i, l in enumerate(labels):
self.do_save_image(
workspace, filename, l, pixel_type, t=i, size_t=len(labels))
else:
if self.colormap == cps.DEFAULT:
colormap = cpp.get_default_colormap()
else:
colormap = self.colormap.value
cm = matplotlib.cm.get_cmap(colormap)
cpixels = np.zeros((labels[0].shape[0], labels[0].shape[1], 3))
counts = np.zeros(labels[0].shape, int)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
for pixels in labels:
cpixels[pixels != 0, :] += \
mapper.to_rgba(distance_color_labels(pixels),
bytes=True)[pixels != 0, :3]
counts[pixels != 0] += 1
counts[counts == 0] = 1
cpixels = cpixels / counts[:, :, np.newaxis]
self.do_save_image(workspace, filename, cpixels, ome.PT_UINT8)
self.save_filename_measurements(workspace)
if self.show_window:
workspace.display_data.wrote_image = True
def post_group(self, workspace, *args):
if (self.when_to_save == WS_LAST_CYCLE and
self.save_image_or_figure != IF_MOVIE):
if self.save_image_or_figure == IF_OBJECTS:
self.save_objects(workspace)
else:
self.save_image(workspace)
def do_save_image(self, workspace, filename, pixels, pixel_type,
c = 0, z = 0, t = 0,
size_c = 1, size_z = 1, size_t = 1,
channel_names = None):
'''Save image using bioformats
workspace - the current workspace
filename - save to this filename
pixels - the image to save
pixel_type - save using this pixel type
c - the image's channel index
z - the image's z index
t - the image's t index
sizeC - # of channels in the stack
sizeZ - # of z stacks
sizeT - # of timepoints in the stack
channel_names - names of the channels (make up names if not present
'''
write_image(filename, pixels, pixel_type,
c = c, z = z, t = t,
size_c = size_c, size_z = size_z, size_t = size_t,
channel_names = channel_names)
def save_image(self, workspace):
if self.show_window:
workspace.display_data.wrote_image = False
image = workspace.image_set.get_image(self.image_name.value)
if self.save_image_or_figure == IF_IMAGE:
pixels = image.pixel_data
u16hack = (self.get_bit_depth() == BIT_DEPTH_16 and
pixels.dtype.kind in ('u', 'i'))
if self.file_format != FF_MAT:
if self.rescale.value:
pixels = pixels.copy()
# Normalize intensities for each channel
if pixels.ndim == 3:
# RGB
for i in range(3):
img_min = np.min(pixels[:,:,i])
img_max = np.max(pixels[:,:,i])
if img_max > img_min:
pixels[:,:,i] = (pixels[:,:,i] - img_min) / (img_max - img_min)
else:
# Grayscale
img_min = np.min(pixels)
img_max = np.max(pixels)
if img_max > img_min:
pixels = (pixels - img_min) / (img_max - img_min)
elif not (u16hack or self.get_bit_depth() == BIT_DEPTH_FLOAT):
# Clip at 0 and 1
if np.max(pixels) > 1 or np.min(pixels) < 0:
sys.stderr.write(
"Warning, clipping image %s before output. Some intensities are outside of range 0-1" %
self.image_name.value)
pixels = pixels.copy()
pixels[pixels < 0] = 0
pixels[pixels > 1] = 1
if pixels.ndim == 2 and self.colormap != CM_GRAY and\
self.get_bit_depth() == BIT_DEPTH_8:
# Convert grayscale image to rgb for writing
if self.colormap == cps.DEFAULT:
colormap = cpp.get_default_colormap()
else:
colormap = self.colormap.value
cm = matplotlib.cm.get_cmap(colormap)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
pixels = mapper.to_rgba(pixels, bytes=True)
pixel_type = ome.PT_UINT8
elif self.get_bit_depth() == BIT_DEPTH_8:
pixels = (pixels*255).astype(np.uint8)
pixel_type = ome.PT_UINT8
elif self.get_bit_depth() == BIT_DEPTH_FLOAT:
pixel_type = ome.PT_FLOAT
else:
if not u16hack:
pixels = (pixels*65535)
pixel_type = ome.PT_UINT16
elif self.save_image_or_figure == IF_MASK:
pixels = image.mask.astype(np.uint8) * 255
pixel_type = ome.PT_UINT8
elif self.save_image_or_figure == IF_CROPPING:
pixels = image.crop_mask.astype(np.uint8) * 255
pixel_type = ome.PT_UINT8
filename = self.get_filename(workspace)
if filename is None: # failed overwrite check
return
if self.get_file_format() == FF_MAT:
scipy.io.matlab.mio.savemat(filename,{"Image":pixels},format='5')
elif self.get_file_format() == FF_BMP:
save_bmp(filename, pixels)
else:
self.do_save_image(workspace, filename, pixels, pixel_type)
if self.show_window:
workspace.display_data.wrote_image = True
if self.when_to_save != WS_LAST_CYCLE:
self.save_filename_measurements(workspace)
def check_overwrite(self, filename, workspace):
'''Check to see if it's legal to overwrite a file
Throws an exception if can't overwrite and no interaction available.
Returns False if can't overwrite, otherwise True.
'''
if not self.overwrite.value and os.path.isfile(filename):
try:
return (workspace.interaction_request(self, workspace.measurements.image_set_number, filename) == "Yes")
except workspace.NoInteractionException:
raise ValueError('SaveImages: trying to overwrite %s in headless mode, but Overwrite files is set to "No"' % (filename))
return True
def handle_interaction(self, image_set_number, filename):
'''handle an interaction request from check_overwrite()'''
import wx
dlg = wx.MessageDialog(wx.GetApp().TopWindow,
"%s #%d, set #%d - Do you want to overwrite %s?" % \
(self.module_name, self.module_num, image_set_number, filename),
"Warning: overwriting file", wx.YES_NO | wx.ICON_QUESTION)
result = dlg.ShowModal() == wx.ID_YES
return "Yes" if result else "No"
def save_filename_measurements(self, workspace):
if self.update_file_names.value:
filename = self.get_filename(workspace, make_dirs = False,
check_overwrite = False)
pn, fn = os.path.split(filename)
url = pathname2url(filename)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.file_name_feature,
fn,
can_overwrite=True)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.path_name_feature,
pn,
can_overwrite=True)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.url_feature,
url,
can_overwrite=True)
@property
def file_name_feature(self):
'''The file name measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_FILE_NAME, self.objects_name.value))
return '_'.join((C_FILE_NAME, self.image_name.value))
@property
def path_name_feature(self):
'''The path name measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_PATH_NAME, self.objects_name.value))
return '_'.join((C_PATH_NAME, self.image_name.value))
@property
def url_feature(self):
'''The URL measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_URL, self.objects_name.value))
return '_'.join((C_URL, self.image_name.value))
@property
def source_file_name_feature(self):
'''The file name measurement for the exemplar disk image'''
return '_'.join((C_FILE_NAME, self.file_image_name.value))
def source_path(self, workspace):
'''The path for the image data, or its first parent with a path'''
if self.file_name_method.value == FN_FROM_IMAGE:
path_feature = '%s_%s' % (C_PATH_NAME, self.file_image_name.value)
assert workspace.measurements.has_feature(cpmeas.IMAGE, path_feature),\
"Image %s does not have a path!" % (self.file_image_name.value)
return workspace.measurements.get_current_image_measurement(path_feature)
# ... otherwise, chase the cpimage hierarchy looking for an image with a path
cur_image = workspace.image_set.get_image(self.image_name.value)
while cur_image.path_name is None:
cur_image = cur_image.parent_image
assert cur_image is not None, "Could not determine source path for image %s' % (self.image_name.value)"
return cur_image.path_name
def get_measurement_columns(self, pipeline):
if self.update_file_names.value:
return [(cpmeas.IMAGE,
self.file_name_feature,
cpmeas.COLTYPE_VARCHAR_FILE_NAME),
(cpmeas.IMAGE,
self.path_name_feature,
cpmeas.COLTYPE_VARCHAR_PATH_NAME)]
else:
return []
def get_filename(self, workspace, make_dirs=True, check_overwrite=True):
"Concoct a filename for the current image based on the user settings"
measurements=workspace.measurements
if self.file_name_method == FN_SINGLE_NAME:
filename = self.single_file_name.value
filename = workspace.measurements.apply_metadata(filename)
elif self.file_name_method == FN_SEQUENTIAL:
filename = self.single_file_name.value
filename = workspace.measurements.apply_metadata(filename)
n_image_sets = workspace.measurements.image_set_count
ndigits = int(np.ceil(np.log10(n_image_sets+1)))
ndigits = max((ndigits,self.number_of_digits.value))
padded_num_string = str(measurements.image_set_number).zfill(ndigits)
filename = '%s%s'%(filename, padded_num_string)
else:
file_name_feature = self.source_file_name_feature
filename = measurements.get_current_measurement('Image',
file_name_feature)
filename = os.path.splitext(filename)[0]
if self.wants_file_name_suffix:
suffix = self.file_name_suffix.value
suffix = workspace.measurements.apply_metadata(suffix)
filename += suffix
filename = "%s.%s"%(filename,self.get_file_format())
pathname = self.pathname.get_absolute_path(measurements)
if self.create_subdirectories:
image_path = self.source_path(workspace)
subdir = relpath(image_path, self.root_dir.get_absolute_path())
pathname = os.path.join(pathname, subdir)
if len(pathname) and not os.path.isdir(pathname) and make_dirs:
try:
os.makedirs(pathname)
except:
#
# On cluster, this can fail if the path was created by
# another process after this process found it did not exist.
#
if not os.path.isdir(pathname):
raise
result = os.path.join(pathname, filename)
if check_overwrite and not self.check_overwrite(result, workspace):
return
if check_overwrite and os.path.isfile(result):
try:
os.remove(result)
except:
import bioformats
bioformats.clear_image_reader_cache()
os.remove(result)
return result
def get_file_format(self):
"""Return the file format associated with the extension in self.file_format
"""
if self.save_image_or_figure == IF_MOVIE:
return self.movie_format.value
return self.file_format.value
def get_bit_depth(self):
if (self.save_image_or_figure == IF_IMAGE and
self.get_file_format() in FF_SUPPORTING_16_BIT):
return self.bit_depth.value
else:
return BIT_DEPTH_8
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
"""Adjust the setting values to be backwards-compatible with old versions
"""
PC_DEFAULT = "Default output folder"
#################################
#
# Matlab legacy
#
#################################
if from_matlab and variable_revision_number == 12:
# self.create_subdirectories.value is already False by default.
variable_revision_number = 13
if from_matlab and variable_revision_number == 13:
new_setting_values = list(setting_values)
for i in [3, 12]:
if setting_values[i] == '\\':
new_setting_values[i] == cps.DO_NOT_USE
variable_revision_number = 14
if from_matlab and variable_revision_number == 14:
new_setting_values = []
if setting_values[0].isdigit():
new_setting_values.extend([IF_FIGURE,setting_values[1]])
elif setting_values[3] == 'avi':
new_setting_values.extend([IF_MOVIE, setting_values[0]])
elif setting_values[0].startswith("Cropping"):
new_setting_values.extend([IF_CROPPING,
setting_values[0][len("Cropping"):]])
elif setting_values[0].startswith("CropMask"):
new_setting_values.extend([IF_MASK,
setting_values[0][len("CropMask"):]])
else:
new_setting_values.extend([IF_IMAGE, setting_values[0]])
new_setting_values.append(new_setting_values[1])
if setting_values[1] == 'N':
new_setting_values.extend([FN_SEQUENTIAL,"None","None"])
elif setting_values[1][0] == '=':
new_setting_values.extend([FN_SINGLE_NAME,setting_values[1][1:],
setting_values[1][1:]])
else:
if len(cpmeas.find_metadata_tokens(setting_values[1])):
new_setting_values.extend([FN_WITH_METADATA, setting_values[1],
setting_values[1]])
else:
new_setting_values.extend([FN_FROM_IMAGE, setting_values[1],
setting_values[1]])
new_setting_values.extend(setting_values[2:4])
if setting_values[4] == '.':
new_setting_values.extend([PC_DEFAULT, "None"])
elif setting_values[4] == '&':
new_setting_values.extend([PC_WITH_IMAGE, "None"])
else:
if len(cpmeas.find_metadata_tokens(setting_values[1])):
new_setting_values.extend([PC_WITH_METADATA,
setting_values[4]])
else:
new_setting_values.extend([PC_CUSTOM, setting_values[4]])
new_setting_values.extend(setting_values[5:11])
#
# Last value is there just to display some text in Matlab
#
new_setting_values.extend(setting_values[12:-1])
setting_values = new_setting_values
from_matlab = False
variable_revision_number = 1
##########################
#
# Version 2
#
##########################
if not from_matlab and variable_revision_number == 1:
# The logic of the question about overwriting was reversed.
if setting_values[11] == cps.YES:
setting_values[11] = cps.NO
else:
setting_values[11] = cps.YES
variable_revision_number = 2
#########################
#
# Version 3
#
#########################
if (not from_matlab) and variable_revision_number == 2:
# Default image/output directory -> Default Image Folder
if setting_values[8].startswith("Default output"):
setting_values = (setting_values[:8] +
[PC_DEFAULT]+ setting_values[9:])
elif setting_values[8].startswith("Same"):
setting_values = (setting_values[:8] +
[PC_WITH_IMAGE] + setting_values[9:])
variable_revision_number = 3
#########################
#
# Version 4
#
#########################
if (not from_matlab) and variable_revision_number == 3:
# Changed save type from "Figure" to "Module window"
if setting_values[0] == "Figure":
setting_values[0] = IF_FIGURE
setting_values = standardize_default_folder_names(setting_values,8)
variable_revision_number = 4
#########################
#
# Version 5
#
#########################
if (not from_matlab) and variable_revision_number == 4:
save_image_or_figure, image_name, figure_name,\
file_name_method, file_image_name, \
single_file_name, file_name_suffix, file_format, \
pathname_choice, pathname, bit_depth, \
overwrite, when_to_save, \
when_to_save_movie, rescale, colormap, \
update_file_names, create_subdirectories = setting_values
pathname = SaveImagesDirectoryPath.static_join_string(
pathname_choice, pathname)
setting_values = [
save_image_or_figure, image_name, figure_name,
file_name_method, file_image_name, single_file_name,
file_name_suffix != cps.DO_NOT_USE,
file_name_suffix, file_format,
pathname, bit_depth, overwrite, when_to_save,
rescale, colormap, update_file_names, create_subdirectories]
variable_revision_number = 5
#######################
#
# Version 6
#
#######################
if (not from_matlab) and variable_revision_number == 5:
setting_values = list(setting_values)
file_name_method = setting_values[3]
single_file_name = setting_values[5]
wants_file_suffix = setting_values[6]
file_name_suffix = setting_values[7]
if file_name_method == FN_IMAGE_FILENAME_WITH_METADATA:
file_name_suffix = single_file_name
wants_file_suffix = cps.YES
file_name_method = FN_FROM_IMAGE
elif file_name_method == FN_WITH_METADATA:
file_name_method = FN_SINGLE_NAME
setting_values[3] = file_name_method
setting_values[6] = wants_file_suffix
setting_values[7] = file_name_suffix
variable_revision_number = 6
######################
#
# Version 7 - added objects
#
######################
if (not from_matlab) and (variable_revision_number == 6):
setting_values = (
setting_values[:2] + ["None"] + setting_values[2:14] +
[ GC_GRAYSCALE ] + setting_values[14:])
variable_revision_number = 7
######################
#
# Version 8 - added root_dir
#
######################
if (not from_matlab) and (variable_revision_number == 7):
setting_values = setting_values + [DEFAULT_INPUT_FOLDER_NAME]
variable_revision_number = 8
######################
#
# Version 9 - FF_TIF now outputs .tif files (go figure), so
# change FF_TIF in settings to FF_TIFF to maintain ultimate
# backwards compatibiliy.
#
######################
if (not from_matlab) and (variable_revision_number == 8):
if setting_values[9] == FF_TIF:
setting_values = setting_values[:9] + [FF_TIFF] + \
setting_values[10:]
variable_revision_number = 9
######################
#
# Version 10 - Add number of digits for sequential numbering
#
######################
if (not from_matlab) and (variable_revision_number == 9):
setting_values = setting_values[:7] + ["4"] + \
setting_values[7:]
variable_revision_number = 10
######################
#
# Version 11 - Allow selection of movie format
#
######################
if (not from_matlab) and (variable_revision_number == 10):
setting_values = setting_values + [ FF_AVI ]
variable_revision_number = 11
######################
#
# Version 11.5 - name of bit depth changed
# (can fix w/o version change)
#
######################
if variable_revision_number == 11:
bit_depth = setting_values[OFFSET_BIT_DEPTH_V11]
bit_depth = {
OLD_BIT_DEPTH_8:BIT_DEPTH_8,
OLD_BIT_DEPTH_16:BIT_DEPTH_16 }.get(bit_depth, bit_depth)
setting_values = setting_values[:OFFSET_BIT_DEPTH_V11] + \
[bit_depth] + setting_values[OFFSET_BIT_DEPTH_V11+1:]
setting_values[OFFSET_DIRECTORY_PATH] = \
SaveImagesDirectoryPath.upgrade_setting(setting_values[OFFSET_DIRECTORY_PATH])
return setting_values, variable_revision_number, from_matlab
def validate_module(self, pipeline):
if (self.save_image_or_figure in (IF_IMAGE, IF_MASK, IF_CROPPING) and
self.when_to_save in (WS_FIRST_CYCLE, WS_EVERY_CYCLE)):
#
# Make sure that the image name is available on every cycle
#
for setting in cps.get_name_providers(pipeline,
self.image_name):
if setting.provided_attributes.get(cps.AVAILABLE_ON_LAST_ATTRIBUTE):
#
# If we fell through, then you can only save on the last cycle
#
raise cps.ValidationError("%s is only available after processing all images in an image group" %
self.image_name.value,
self.when_to_save)
# XXX - should check that if file_name_method is
# FN_FROM_IMAGE, that the named image actually has the
# required path measurement
# Make sure metadata tags exist
if self.file_name_method == FN_SINGLE_NAME or \
(self.file_name_method == FN_FROM_IMAGE and self.wants_file_name_suffix.value):
text_str = self.single_file_name.value if self.file_name_method == FN_SINGLE_NAME else self.file_name_suffix.value
undefined_tags = pipeline.get_undefined_metadata_tags(text_str)
if len(undefined_tags) > 0:
raise cps.ValidationError("%s is not a defined metadata tag. Check the metadata specifications in your load modules" %
undefined_tags[0],
self.single_file_name if self.file_name_method == FN_SINGLE_NAME else self.file_name_suffix)
class SaveImagesDirectoryPath(cps.DirectoryPath):
'''A specialized version of DirectoryPath to handle saving in the image dir'''
def __init__(self, text, file_image_name, doc):
'''Constructor
text - explanatory text to display
file_image_name - the file_image_name setting so we can save in same dir
doc - documentation for user
'''
super(SaveImagesDirectoryPath, self).__init__(
text, dir_choices = [
cps.DEFAULT_OUTPUT_FOLDER_NAME, cps.DEFAULT_INPUT_FOLDER_NAME,
PC_WITH_IMAGE, cps.ABSOLUTE_FOLDER_NAME,
cps.DEFAULT_OUTPUT_SUBFOLDER_NAME,
cps.DEFAULT_INPUT_SUBFOLDER_NAME], doc=doc)
self.file_image_name = file_image_name
def get_absolute_path(self, measurements=None, image_set_index=None):
if self.dir_choice == PC_WITH_IMAGE:
path_name_feature = "PathName_%s" % self.file_image_name.value
return measurements.get_current_image_measurement(path_name_feature)
return super(SaveImagesDirectoryPath, self).get_absolute_path(
measurements, image_set_index)
def test_valid(self, pipeline):
if self.dir_choice not in self.dir_choices:
raise cps.ValidationError("%s is not a valid directory option" %
self.dir_choice, self)
@staticmethod
def upgrade_setting(value):
'''Upgrade setting from previous version'''
dir_choice, custom_path = cps.DirectoryPath.split_string(value)
if dir_choice in OLD_PC_WITH_IMAGE_VALUES:
dir_choice = PC_WITH_IMAGE
elif dir_choice in (PC_CUSTOM, PC_WITH_METADATA):
if custom_path.startswith('.'):
dir_choice = cps.DEFAULT_OUTPUT_SUBFOLDER_NAME
elif custom_path.startswith('&'):
dir_choice = cps.DEFAULT_INPUT_SUBFOLDER_NAME
custom_path = '.' + custom_path[1:]
else:
dir_choice = cps.ABSOLUTE_FOLDER_NAME
else:
return cps.DirectoryPath.upgrade_setting(value)
return cps.DirectoryPath.static_join_string(dir_choice, custom_path)
def save_bmp(path, img):
'''Save an image as a Microsoft .bmp file
path - path to file to save
img - either a 2d, uint8 image or a 2d + 3 plane uint8 RGB color image
Saves file as an uncompressed 8-bit or 24-bit .bmp image
'''
#
# Details from
# http://en.wikipedia.org/wiki/BMP_file_format#cite_note-DIBHeaderTypes-3
#
# BITMAPFILEHEADER
# http://msdn.microsoft.com/en-us/library/dd183374(v=vs.85).aspx
#
# BITMAPINFOHEADER
# http://msdn.microsoft.com/en-us/library/dd183376(v=vs.85).aspx
#
BITMAPINFOHEADER_SIZE = 40
img = img.astype(np.uint8)
w = img.shape[1]
h = img.shape[0]
#
# Convert RGB to interleaved
#
if img.ndim == 3:
rgb = True
#
# Compute padded raster length
#
raster_length = (w * 3 + 3) & ~ 3
tmp = np.zeros((h, raster_length), np.uint8)
#
# Do not understand why but RGB is BGR
#
tmp[:, 2:(w*3):3] = img[:, :, 0]
tmp[:, 1:(w*3):3] = img[:, :, 1]
tmp[:, 0:(w*3):3] = img[:, :, 2]
img = tmp
else:
rgb = False
if w % 4 != 0:
raster_length = (w + 3) & ~ 3
tmp = np.zeros((h, raster_length), np.uint8)
tmp[:, :w] = img
img = tmp
#
# The image is upside-down in .BMP
#
bmp = np.ascontiguousarray(np.flipud(img)).data
with open(path, "wb") as fd:
def write2(value):
'''write a two-byte little-endian value to the file'''
fd.write(np.array([value], "<u2").data[:2])
def write4(value):
'''write a four-byte little-endian value to the file'''
fd.write(np.array([value], "<u4").data[:4])
#
# Bitmap file header (1st pass)
# byte
# 0-1 = "BM"
# 2-5 = length of file
# 6-9 = 0
# 10-13 = offset from beginning of file to bitmap bits
fd.write("BM")
length = 14 # BITMAPFILEHEADER
length += BITMAPINFOHEADER_SIZE
if not rgb:
length += 4 * 256 # 256 color table entries
hdr_length = length
length += len(bmp)
write4(length)
write4(0)
write4(hdr_length)
#
# BITMAPINFOHEADER
#
write4(BITMAPINFOHEADER_SIZE) # biSize
write4(w) # biWidth
write4(h) # biHeight
write2(1) # biPlanes = 1
write2(24 if rgb else 8) # biBitCount
write4(0) # biCompression = BI_RGB
write4(len(bmp)) # biSizeImage
write4(7200) # biXPelsPerMeter
write4(7200) # biYPelsPerMeter
write4(0 if rgb else 256) # biClrUsed (no palette)
write4(0) # biClrImportant
if not rgb:
# The color table
color_table = np.column_stack(
[np.arange(256)]* 3 +
[np.zeros(256, np.uint32)]).astype(np.uint8)
fd.write(np.ascontiguousarray(color_table, np.uint8).data)
fd.write(bmp)
| gpl-2.0 |
jseabold/statsmodels | statsmodels/tsa/statespace/tests/test_dynamic_factor.py | 4 | 38900 | """
Tests for VARMAX models
Author: Chad Fulton
License: Simplified-BSD
"""
import os
import re
import warnings
import numpy as np
from numpy.testing import assert_equal, assert_raises, assert_allclose
import pandas as pd
import pytest
from statsmodels.tsa.statespace import dynamic_factor
from .results import results_varmax, results_dynamic_factor
from statsmodels.iolib.summary import forg
current_path = os.path.dirname(os.path.abspath(__file__))
output_path = os.path.join('results', 'results_dynamic_factor_stata.csv')
output_results = pd.read_csv(os.path.join(current_path, output_path))
class CheckDynamicFactor(object):
@classmethod
def setup_class(cls, true, k_factors, factor_order, cov_type='approx',
included_vars=['dln_inv', 'dln_inc', 'dln_consump'],
demean=False, filter=True, **kwargs):
cls.true = true
# 1960:Q1 - 1982:Q4
dta = pd.DataFrame(
results_varmax.lutkepohl_data, columns=['inv', 'inc', 'consump'],
index=pd.date_range('1960-01-01', '1982-10-01', freq='QS'))
dta['dln_inv'] = np.log(dta['inv']).diff()
dta['dln_inc'] = np.log(dta['inc']).diff()
dta['dln_consump'] = np.log(dta['consump']).diff()
endog = dta.loc['1960-04-01':'1978-10-01', included_vars]
if demean:
endog -= dta.iloc[1:][included_vars].mean()
cls.model = dynamic_factor.DynamicFactor(endog, k_factors=k_factors,
factor_order=factor_order,
**kwargs)
if filter:
cls.results = cls.model.smooth(true['params'], cov_type=cov_type)
def test_params(self):
# Smoke test to make sure the start_params are well-defined and
# lead to a well-defined model
self.model.filter(self.model.start_params)
# Similarly a smoke test for param_names
assert_equal(len(self.model.start_params), len(self.model.param_names))
# Finally make sure the transform and untransform do their job
actual = self.model.transform_params(
self.model.untransform_params(self.model.start_params))
assert_allclose(actual, self.model.start_params)
# Also in the case of enforce stationarity = False
self.model.enforce_stationarity = False
actual = self.model.transform_params(
self.model.untransform_params(self.model.start_params))
self.model.enforce_stationarity = True
assert_allclose(actual, self.model.start_params)
def test_results(self, close_figures):
# Smoke test for creating the summary
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.results.summary()
# Test cofficient matrix creation
# (via a different, more direct, method)
if self.model.factor_order > 0:
model = self.model
k_factors = model.k_factors
pft_params = self.results.params[model._params_factor_transition]
coefficients = np.array(pft_params).reshape(
k_factors, k_factors * model.factor_order)
coefficient_matrices = np.array([
coefficients[:self.model.k_factors,
i*self.model.k_factors:(i+1)*self.model.k_factors]
for i in range(self.model.factor_order)
])
assert_equal(
self.results.coefficient_matrices_var,
coefficient_matrices)
else:
assert_equal(self.results.coefficient_matrices_var, None)
@pytest.mark.matplotlib
def test_plot_coefficients_of_determination(self, close_figures):
# Smoke test for plot_coefficients_of_determination
self.results.plot_coefficients_of_determination()
def test_no_enforce(self):
return
# Test that nothing goes wrong when we do not enforce stationarity
params = self.model.untransform_params(self.true['params'])
params[self.model._params_transition] = (
self.true['params'][self.model._params_transition])
self.model.enforce_stationarity = False
results = self.model.filter(params, transformed=False)
self.model.enforce_stationarity = True
assert_allclose(results.llf, self.results.llf, rtol=1e-5)
def test_mle(self, init_powell=True):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
start_params = self.model.start_params
if init_powell:
results = self.model.fit(method='powell',
maxiter=100, disp=False)
start_params = results.params
results = self.model.fit(start_params, maxiter=1000, disp=False)
results = self.model.fit(results.params, method='nm', maxiter=1000,
disp=False)
if not results.llf > self.results.llf:
assert_allclose(results.llf, self.results.llf, rtol=1e-5)
def test_loglike(self):
assert_allclose(self.results.llf, self.true['loglike'], rtol=1e-6)
def test_aic(self):
# We only get 3 digits from Stata
assert_allclose(self.results.aic, self.true['aic'], atol=3)
def test_bic(self):
# We only get 3 digits from Stata
assert_allclose(self.results.bic, self.true['bic'], atol=3)
def test_predict(self, **kwargs):
# Tests predict + forecast
self.results.predict(end='1982-10-01', **kwargs)
assert_allclose(
self.results.predict(end='1982-10-01', **kwargs),
self.true['predict'],
atol=1e-6)
def test_dynamic_predict(self, **kwargs):
# Tests predict + dynamic predict + forecast
assert_allclose(
self.results.predict(end='1982-10-01', dynamic='1961-01-01',
**kwargs),
self.true['dynamic_predict'],
atol=1e-6)
class TestDynamicFactor(CheckDynamicFactor):
"""
Test for a dynamic factor model with 1 AR(2) factor
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm_1', 'predict_dfm_2', 'predict_dfm_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm_1', 'dyn_predict_dfm_2', 'dyn_predict_dfm_3']]
super(TestDynamicFactor, cls).setup_class(
true, k_factors=1, factor_order=2)
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()**0.5
assert_allclose(bse, self.true['bse_oim'], atol=1e-5)
class TestDynamicFactor2(CheckDynamicFactor):
"""
Test for a dynamic factor model with two VAR(1) factors
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm2.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm2_1', 'predict_dfm2_2', 'predict_dfm2_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm2_1', 'dyn_predict_dfm2_2', 'dyn_predict_dfm2_3']]
super(TestDynamicFactor2, cls).setup_class(
true, k_factors=2, factor_order=1)
def test_mle(self):
# Stata's MLE on this model does not converge, so no reason to check
pass
def test_bse(self):
# Stata's MLE on this model does not converge, and four of their
# params do not even have bse (possibly they are still at starting
# values?), so no reason to check this
pass
def test_aic(self):
# Stata uses 9 df (i.e. 9 params) here instead of 13, because since the
# model did not coverge, 4 of the parameters are not fully estimated
# (possibly they are still at starting values?) so the AIC is off
pass
def test_bic(self):
# Stata uses 9 df (i.e. 9 params) here instead of 13, because since the
# model did not coverge, 4 of the parameters are not fully estimated
# (possibly they are still at starting values?) so the BIC is off
pass
def test_summary(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Make sure we have the right number of tables
assert_equal(
len(tables),
2 + self.model.k_endog + self.model.k_factors + 1)
# Check the model overview table
assert re.search(
r'Model:.*DynamicFactor\(factors=2, order=1\)',
tables[0])
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_loading = self.model.k_factors * i
table = tables[i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Results for equation %s' % name, table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 7)
# -> Check that we have the right coefficients
assert re.search(
'loading.f1 +' + forg(params[offset_loading + 0], prec=4),
table)
assert re.search(
'loading.f2 +' + forg(params[offset_loading + 1], prec=4),
table)
# For each factor, check the output
for i in range(self.model.k_factors):
offset = (self.model.k_endog * (self.model.k_factors + 1) +
i * self.model.k_factors)
table = tables[self.model.k_endog + i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Results for factor equation f%d' % (i+1), table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 7)
# -> Check that we have the right coefficients
assert re.search('L1.f1 +' + forg(params[offset + 0], prec=4),
table)
assert re.search('L1.f2 +' + forg(params[offset + 1], prec=4),
table)
# Check the Error covariance matrix output
table = tables[2 + self.model.k_endog + self.model.k_factors]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Error covariance matrix', table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
offset = self.model.k_endog * self.model.k_factors
for i in range(self.model.k_endog):
iname = self.model.endog_names[i]
iparam = forg(params[offset + i], prec=4)
assert re.search('sigma2.%s +%s' % (iname, iparam), table)
class TestDynamicFactor_exog1(CheckDynamicFactor):
"""
Test for a dynamic factor model with 1 exogenous regressor: a constant
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm_exog1.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm_exog1_1',
'predict_dfm_exog1_2',
'predict_dfm_exog1_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm_exog1_1',
'dyn_predict_dfm_exog1_2',
'dyn_predict_dfm_exog1_3']]
exog = np.ones((75, 1))
super(TestDynamicFactor_exog1, cls).setup_class(
true, k_factors=1, factor_order=1, exog=exog)
def test_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_exog1, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_exog1, self).test_dynamic_predict(exog=exog)
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()**0.5
assert_allclose(bse**2, self.true['var_oim'], atol=1e-5)
class TestDynamicFactor_exog2(CheckDynamicFactor):
"""
Test for a dynamic factor model with 2 exogenous regressors: a constant
and a time-trend
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm_exog2.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm_exog2_1',
'predict_dfm_exog2_2',
'predict_dfm_exog2_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm_exog2_1',
'dyn_predict_dfm_exog2_2',
'dyn_predict_dfm_exog2_3']]
exog = np.c_[np.ones((75, 1)), (np.arange(75) + 2)[:, np.newaxis]]
super(TestDynamicFactor_exog2, cls).setup_class(
true, k_factors=1, factor_order=1, exog=exog)
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()**0.5
assert_allclose(bse**2, self.true['var_oim'], atol=1e-5)
def test_predict(self):
exog = np.c_[np.ones((16, 1)),
(np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestDynamicFactor_exog2, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.c_[np.ones((16, 1)),
(np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestDynamicFactor_exog2, self).test_dynamic_predict(exog=exog)
def test_summary(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Make sure we have the right number of tables
assert_equal(
len(tables),
2 + self.model.k_endog + self.model.k_factors + 1)
# Check the model overview table
assert re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)',
tables[0])
assert_equal(re.search(r'.*2 regressors', tables[0]) is None, False)
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_loading = self.model.k_factors * i
offset_exog = self.model.k_factors * self.model.k_endog
table = tables[i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Results for equation %s' % name, table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
assert re.search(
'loading.f1 +' + forg(params[offset_loading + 0], prec=4),
table)
assert re.search(
'beta.const +' + forg(params[offset_exog + i*2 + 0], prec=4),
table)
assert re.search(
'beta.x1 +' + forg(params[offset_exog + i*2 + 1], prec=4),
table)
# For each factor, check the output
for i in range(self.model.k_factors):
offset = (self.model.k_endog * (self.model.k_factors + 3) +
i * self.model.k_factors)
table = tables[self.model.k_endog + i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Results for factor equation f%d' % (i+1), table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 6)
# -> Check that we have the right coefficients
assert re.search('L1.f1 +' + forg(params[offset + 0], prec=4),
table)
# Check the Error covariance matrix output
table = tables[2 + self.model.k_endog + self.model.k_factors]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Error covariance matrix', table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
offset = self.model.k_endog * (self.model.k_factors + 2)
for i in range(self.model.k_endog):
iname = self.model.endog_names[i]
iparam = forg(params[offset + i], prec=4)
assert re.search('sigma2.%s +%s' % (iname, iparam), table)
class TestDynamicFactor_general_errors(CheckDynamicFactor):
"""
Test for a dynamic factor model where errors are as general as possible,
meaning:
- Errors are vector autocorrelated, VAR(1)
- Innovations are correlated
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm_gen.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm_gen_1', 'predict_dfm_gen_2', 'predict_dfm_gen_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm_gen_1',
'dyn_predict_dfm_gen_2',
'dyn_predict_dfm_gen_3']]
super(TestDynamicFactor_general_errors, cls).setup_class(
true, k_factors=1, factor_order=1, error_var=True,
error_order=1, error_cov_type='unstructured')
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()
assert_allclose(bse[:3], self.true['var_oim'][:3], atol=1e-5)
assert_allclose(bse[-10:], self.true['var_oim'][-10:], atol=3e-4)
@pytest.mark.skip("Known failure, no sequence of optimizers has been "
"found which can achieve the maximum.")
def test_mle(self):
# The following gets us to llf=546.53, which is still not good enough
# llf = 300.842477412
# res = mod.fit(method='lbfgs', maxiter=10000)
# llf = 460.26576722
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 542.245718508
# res = mod.fit(res.params, method='lbfgs', maxiter=10000)
# llf = 544.035160955
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 557.442240083
# res = mod.fit(res.params, method='lbfgs', maxiter=10000)
# llf = 558.199513262
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 559.049076604
# res = mod.fit(res.params, method='nm', maxiter=10000, maxfev=10000)
# llf = 559.049076604
# ...
pass
def test_summary(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
summary = self.results.summary()
tables = [str(table) for table in summary.tables]
params = self.true['params']
# Make sure we have the right number of tables
assert_equal(
len(tables),
2 + self.model.k_endog + self.model.k_factors +
self.model.k_endog + 1)
# Check the model overview table
assert re.search(r'Model:.*DynamicFactor\(factors=1, order=1\)',
tables[0])
assert re.search(r'.*VAR\(1\) errors', tables[0])
# For each endogenous variable, check the output
for i in range(self.model.k_endog):
offset_loading = self.model.k_factors * i
table = tables[i + 2]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Results for equation %s' % name, table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 6)
# -> Check that we have the right coefficients
pattern = 'loading.f1 +' + forg(params[offset_loading + 0], prec=4)
assert re.search(pattern, table)
# For each factor, check the output
for i in range(self.model.k_factors):
offset = (self.model.k_endog * self.model.k_factors +
6 + i * self.model.k_factors)
table = tables[2 + self.model.k_endog + i]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Results for factor equation f%d' % (i+1), table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 6)
# -> Check that we have the right coefficients
assert re.search('L1.f1 +' + forg(params[offset + 0], prec=4),
table)
# For each error equation, check the output
for i in range(self.model.k_endog):
offset = (self.model.k_endog * (self.model.k_factors + i) +
6 + self.model.k_factors)
table = tables[2 + self.model.k_endog + self.model.k_factors + i]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search(r'Results for error equation e\(%s\)' % name,
table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 8)
# -> Check that we have the right coefficients
for j in range(self.model.k_endog):
name = self.model.endog_names[j]
pattern = r'L1.e\(%s\) +%s' % (name, forg(params[offset + j],
prec=4))
assert re.search(pattern, table)
# Check the Error covariance matrix output
table = tables[2 + self.model.k_endog +
self.model.k_factors + self.model.k_endog]
# -> Make sure we have the right table / table name
name = self.model.endog_names[i]
assert re.search('Error covariance matrix', table)
# -> Make sure it's the right size
assert_equal(len(table.split('\n')), 11)
# -> Check that we have the right coefficients
offset = self.model.k_endog * self.model.k_factors
assert re.search(
r'cov.chol\[1,1\] +' + forg(params[offset + 0], prec=4),
table)
assert re.search(
r'cov.chol\[2,1\] +' + forg(params[offset + 1], prec=4),
table)
assert re.search(
r'cov.chol\[2,2\] +' + forg(params[offset + 2], prec=4),
table)
assert re.search(
r'cov.chol\[3,1\] +' + forg(params[offset+3], prec=4),
table)
assert re.search(
r'cov.chol\[3,2\] +' + forg(params[offset+4], prec=4),
table)
assert re.search(
r'cov.chol\[3,3\] +' + forg(params[offset + 5], prec=4),
table)
class TestDynamicFactor_ar2_errors(CheckDynamicFactor):
"""
Test for a dynamic factor model where errors are as general as possible,
meaning:
- Errors are vector autocorrelated, VAR(1)
- Innovations are correlated
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm_ar2.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm_ar2_1', 'predict_dfm_ar2_2', 'predict_dfm_ar2_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm_ar2_1',
'dyn_predict_dfm_ar2_2',
'dyn_predict_dfm_ar2_3']]
super(TestDynamicFactor_ar2_errors, cls).setup_class(
true, k_factors=1, factor_order=1, error_order=2)
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()
assert_allclose(bse, self.true['var_oim'], atol=1e-5)
def test_mle(self):
with warnings.catch_warnings(record=True):
# Depending on the system, this test can reach a greater precision,
# but for cross-platform results keep it at 1e-2
mod = self.model
res1 = mod.fit(maxiter=100, optim_score='approx', disp=False)
res = mod.fit(
res1.params, method='nm', maxiter=10000,
optim_score='approx', disp=False)
# Added rtol to catch spurious failures on some platforms
assert_allclose(res.llf, self.results.llf, atol=1e-2, rtol=1e-4)
class TestDynamicFactor_scalar_error(CheckDynamicFactor):
"""
Test for a dynamic factor model where innovations are uncorrelated and
are forced to have the same variance.
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_dfm_scalar.copy()
true['predict'] = output_results.iloc[1:][[
'predict_dfm_scalar_1', 'predict_dfm_scalar_2',
'predict_dfm_scalar_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_dfm_scalar_1', 'dyn_predict_dfm_scalar_2',
'dyn_predict_dfm_scalar_3']]
exog = np.ones((75, 1))
super(TestDynamicFactor_scalar_error, cls).setup_class(
true, k_factors=1, factor_order=1,
exog=exog, error_cov_type='scalar')
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()
assert_allclose(bse, self.true['var_oim'], atol=1e-5)
def test_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_scalar_error, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.ones((16, 1))
super(TestDynamicFactor_scalar_error,
self).test_dynamic_predict(exog=exog)
class TestStaticFactor(CheckDynamicFactor):
"""
Test for a static factor model (i.e. factors are not autocorrelated).
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_sfm.copy()
true['predict'] = output_results.iloc[1:][[
'predict_sfm_1', 'predict_sfm_2', 'predict_sfm_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_sfm_1', 'dyn_predict_sfm_2', 'dyn_predict_sfm_3']]
super(TestStaticFactor, cls).setup_class(
true, k_factors=1, factor_order=0)
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()
assert_allclose(bse, self.true['var_oim'], atol=1e-5)
def test_bic(self):
# Stata uses 5 df (i.e. 5 params) here instead of 6, because one param
# is basically zero.
pass
class TestSUR(CheckDynamicFactor):
"""
Test for a seemingly unrelated regression model (i.e. no factors) with
errors cross-sectionally, but not auto-, correlated
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_sur.copy()
true['predict'] = output_results.iloc[1:][[
'predict_sur_1', 'predict_sur_2', 'predict_sur_3']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_sur_1', 'dyn_predict_sur_2', 'dyn_predict_sur_3']]
exog = np.c_[np.ones((75, 1)), (np.arange(75) + 2)[:, np.newaxis]]
super(TestSUR, cls).setup_class(
true, k_factors=0, factor_order=0,
exog=exog, error_cov_type='unstructured')
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()
assert_allclose(bse[:6], self.true['var_oim'][:6], atol=1e-5)
def test_predict(self):
exog = np.c_[np.ones((16, 1)),
(np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.c_[np.ones((16, 1)),
(np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR, self).test_dynamic_predict(exog=exog)
class TestSUR_autocorrelated_errors(CheckDynamicFactor):
"""
Test for a seemingly unrelated regression model (i.e. no factors) where
the errors are vector autocorrelated, but innovations are uncorrelated.
"""
@classmethod
def setup_class(cls):
true = results_dynamic_factor.lutkepohl_sur_auto.copy()
true['predict'] = output_results.iloc[1:][[
'predict_sur_auto_1', 'predict_sur_auto_2']]
true['dynamic_predict'] = output_results.iloc[1:][[
'dyn_predict_sur_auto_1', 'dyn_predict_sur_auto_2']]
exog = np.c_[np.ones((75, 1)), (np.arange(75) + 2)[:, np.newaxis]]
super(TestSUR_autocorrelated_errors, cls).setup_class(
true, k_factors=0, factor_order=0, exog=exog,
error_order=1, error_var=True,
error_cov_type='diagonal',
included_vars=['dln_inv', 'dln_inc'])
def test_bse_approx(self):
bse = self.results._cov_params_approx().diagonal()
assert_allclose(bse, self.true['var_oim'], atol=1e-5)
def test_predict(self):
exog = np.c_[np.ones((16, 1)),
(np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR_autocorrelated_errors, self).test_predict(exog=exog)
def test_dynamic_predict(self):
exog = np.c_[np.ones((16, 1)),
(np.arange(75, 75+16) + 2)[:, np.newaxis]]
super(TestSUR_autocorrelated_errors,
self).test_dynamic_predict(exog=exog)
def test_mle(self):
super(TestSUR_autocorrelated_errors, self).test_mle(init_powell=False)
def test_misspecification():
# Tests for model specification and misspecification exceptions
endog = np.arange(20).reshape(10, 2)
# Too few endog
assert_raises(
ValueError,
dynamic_factor.DynamicFactor, endog[:, 0], k_factors=0, factor_order=0)
# Too many factors
assert_raises(
ValueError,
dynamic_factor.DynamicFactor, endog, k_factors=2, factor_order=1)
# Bad error_cov_type specification
assert_raises(
ValueError,
dynamic_factor.DynamicFactor,
endog,
k_factors=1, factor_order=1, order=(1, 0), error_cov_type='')
def test_miscellaneous():
# Initialization with 1-dimensional exog array
exog = np.arange(75)
mod = CheckDynamicFactor()
mod.setup_class(true=None, k_factors=1, factor_order=1,
exog=exog, filter=False)
exog = pd.Series(np.arange(75),
index=pd.date_range(start='1960-04-01',
end='1978-10-01', freq='QS'))
mod = CheckDynamicFactor()
mod.setup_class(
true=None, k_factors=1, factor_order=1, exog=exog, filter=False)
def test_predict_custom_index():
np.random.seed(328423)
endog = pd.DataFrame(np.random.normal(size=(50, 2)))
mod = dynamic_factor.DynamicFactor(endog, k_factors=1, factor_order=1)
res = mod.smooth(mod.start_params)
out = res.predict(start=1, end=1, index=['a'])
assert_equal(out.index.equals(pd.Index(['a'])), True)
def test_forecast_exog():
# Test forecasting with various shapes of `exog`
nobs = 100
endog = np.ones((nobs, 2)) * 2.0
exog = np.ones(nobs)
mod = dynamic_factor.DynamicFactor(endog, exog=exog, k_factors=1,
factor_order=1)
res = mod.smooth(np.r_[[0] * 2, 2.0, 2.0, 1, 1., 0.])
# 1-step-ahead, valid
exog_fcast_scalar = 1.
exog_fcast_1dim = np.ones(1)
exog_fcast_2dim = np.ones((1, 1))
assert_allclose(res.forecast(1, exog=exog_fcast_scalar), 2.)
assert_allclose(res.forecast(1, exog=exog_fcast_1dim), 2.)
assert_allclose(res.forecast(1, exog=exog_fcast_2dim), 2.)
# h-steps-ahead, valid
h = 10
exog_fcast_1dim = np.ones(h)
exog_fcast_2dim = np.ones((h, 1))
assert_allclose(res.forecast(h, exog=exog_fcast_1dim), 2.)
assert_allclose(res.forecast(h, exog=exog_fcast_2dim), 2.)
# h-steps-ahead, invalid
assert_raises(ValueError, res.forecast, h, exog=1.)
assert_raises(ValueError, res.forecast, h, exog=[1, 2])
assert_raises(ValueError, res.forecast, h, exog=np.ones((h, 2)))
def check_equivalent_models(mod, mod2):
attrs = [
'k_factors', 'factor_order', 'error_order', 'error_var',
'error_cov_type', 'enforce_stationarity', 'mle_regression', 'k_params']
ssm_attrs = [
'nobs', 'k_endog', 'k_states', 'k_posdef', 'obs_intercept', 'design',
'obs_cov', 'state_intercept', 'transition', 'selection', 'state_cov']
for attr in attrs:
assert_equal(getattr(mod2, attr), getattr(mod, attr))
for attr in ssm_attrs:
assert_equal(getattr(mod2.ssm, attr), getattr(mod.ssm, attr))
assert_equal(mod2._get_init_kwds(), mod._get_init_kwds())
def test_recreate_model():
nobs = 100
endog = np.ones((nobs, 3)) * 2.0
exog = np.ones(nobs)
k_factors = [0, 1, 2]
factor_orders = [0, 1, 2]
error_orders = [0, 1]
error_vars = [False, True]
error_cov_types = ['diagonal', 'scalar']
import itertools
names = ['k_factors', 'factor_order', 'error_order', 'error_var',
'error_cov_type']
for element in itertools.product(k_factors, factor_orders, error_orders,
error_vars, error_cov_types):
kwargs = dict(zip(names, element))
mod = dynamic_factor.DynamicFactor(endog, exog=exog, **kwargs)
mod2 = dynamic_factor.DynamicFactor(endog, exog=exog,
**mod._get_init_kwds())
check_equivalent_models(mod, mod2)
def test_append_results():
endog = np.arange(200).reshape(100, 2)
exog = np.ones(100)
params = [0.1, -0.2, 1., 2., 1., 1., 0.5, 0.1]
mod1 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=2, exog=exog)
res1 = mod1.smooth(params)
mod2 = dynamic_factor.DynamicFactor(
endog[:50], k_factors=1, factor_order=2, exog=exog[:50])
res2 = mod2.smooth(params)
res3 = res2.append(endog[50:], exog=exog[50:])
assert_equal(res1.specification, res3.specification)
assert_allclose(res3.cov_params_default, res2.cov_params_default)
for attr in ['nobs', 'llf', 'llf_obs', 'loglikelihood_burn']:
assert_equal(getattr(res3, attr), getattr(res1, attr))
for attr in [
'filtered_state', 'filtered_state_cov', 'predicted_state',
'predicted_state_cov', 'forecasts', 'forecasts_error',
'forecasts_error_cov', 'standardized_forecasts_error',
'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',
'scaled_smoothed_estimator',
'scaled_smoothed_estimator_cov', 'smoothing_error',
'smoothed_state',
'smoothed_state_cov', 'smoothed_state_autocov',
'smoothed_measurement_disturbance',
'smoothed_state_disturbance',
'smoothed_measurement_disturbance_cov',
'smoothed_state_disturbance_cov']:
assert_equal(getattr(res3, attr), getattr(res1, attr))
assert_allclose(res3.forecast(10, exog=np.ones(10)),
res1.forecast(10, exog=np.ones(10)))
def test_extend_results():
endog = np.arange(200).reshape(100, 2)
exog = np.ones(100)
params = [0.1, -0.2, 1., 2., 1., 1., 0.5, 0.1]
mod1 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=2, exog=exog)
res1 = mod1.smooth(params)
mod2 = dynamic_factor.DynamicFactor(
endog[:50], k_factors=1, factor_order=2, exog=exog[:50])
res2 = mod2.smooth(params)
res3 = res2.extend(endog[50:], exog=exog[50:])
assert_allclose(res3.llf_obs, res1.llf_obs[50:])
for attr in [
'filtered_state', 'filtered_state_cov', 'predicted_state',
'predicted_state_cov', 'forecasts', 'forecasts_error',
'forecasts_error_cov', 'standardized_forecasts_error',
'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',
'scaled_smoothed_estimator',
'scaled_smoothed_estimator_cov', 'smoothing_error',
'smoothed_state',
'smoothed_state_cov', 'smoothed_state_autocov',
'smoothed_measurement_disturbance',
'smoothed_state_disturbance',
'smoothed_measurement_disturbance_cov',
'smoothed_state_disturbance_cov']:
desired = getattr(res1, attr)
if desired is not None:
desired = desired[..., 50:]
assert_equal(getattr(res3, attr), desired)
assert_allclose(res3.forecast(10, exog=np.ones(10)),
res1.forecast(10, exog=np.ones(10)))
def test_apply_results():
endog = np.arange(200).reshape(100, 2)
exog = np.ones(100)
params = [0.1, -0.2, 1., 2., 1., 1., 0.5, 0.1]
mod1 = dynamic_factor.DynamicFactor(
endog[:50], k_factors=1, factor_order=2, exog=exog[:50])
res1 = mod1.smooth(params)
mod2 = dynamic_factor.DynamicFactor(
endog[50:], k_factors=1, factor_order=2, exog=exog[50:])
res2 = mod2.smooth(params)
res3 = res2.apply(endog[:50], exog=exog[:50])
assert_equal(res1.specification, res3.specification)
assert_allclose(res3.cov_params_default, res2.cov_params_default)
for attr in ['nobs', 'llf', 'llf_obs', 'loglikelihood_burn']:
assert_equal(getattr(res3, attr), getattr(res1, attr))
for attr in [
'filtered_state', 'filtered_state_cov', 'predicted_state',
'predicted_state_cov', 'forecasts', 'forecasts_error',
'forecasts_error_cov', 'standardized_forecasts_error',
'forecasts_error_diffuse_cov', 'predicted_diffuse_state_cov',
'scaled_smoothed_estimator',
'scaled_smoothed_estimator_cov', 'smoothing_error',
'smoothed_state',
'smoothed_state_cov', 'smoothed_state_autocov',
'smoothed_measurement_disturbance',
'smoothed_state_disturbance',
'smoothed_measurement_disturbance_cov',
'smoothed_state_disturbance_cov']:
assert_equal(getattr(res3, attr), getattr(res1, attr))
assert_allclose(res3.forecast(10, exog=np.ones(10)),
res1.forecast(10, exog=np.ones(10)))
def test_start_params_nans():
ix = pd.date_range('1960-01-01', '1982-10-01', freq='QS')
dta = np.log(pd.DataFrame(
results_varmax.lutkepohl_data, columns=['inv', 'inc', 'consump'],
index=ix)).diff().iloc[1:]
endog1 = dta.iloc[:-1]
mod1 = dynamic_factor.DynamicFactor(endog1, k_factors=1, factor_order=1)
endog2 = dta.copy()
endog2.iloc[-1:] = np.nan
mod2 = dynamic_factor.DynamicFactor(endog2, k_factors=1, factor_order=1)
assert_allclose(mod2.start_params, mod1.start_params)
| bsd-3-clause |
JKarathiya/Lean | Algorithm.Framework/Portfolio/BlackLittermanOptimizationPortfolioConstructionModel.py | 1 | 14679 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Logging")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Algorithm import *
from QuantConnect.Logging import Log
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import InsightCollection, InsightDirection
from QuantConnect.Algorithm.Framework.Portfolio import PortfolioConstructionModel, PortfolioTarget, PortfolioBias
from Portfolio.MaximumSharpeRatioPortfolioOptimizer import MaximumSharpeRatioPortfolioOptimizer
from datetime import datetime, timedelta
from itertools import groupby
import pandas as pd
import numpy as np
from numpy import dot, transpose
from numpy.linalg import inv
### <summary>
### Provides an implementation of Black-Litterman portfolio optimization. The model adjusts equilibrium market
### returns by incorporating views from multiple alpha models and therefore to get the optimal risky portfolio
### reflecting those views. If insights of all alpha models have None magnitude or there are linearly dependent
### vectors in link matrix of views, the expected return would be the implied excess equilibrium return.
### The interval of weights in optimization method can be changed based on the long-short algorithm.
### The default model uses the 0.0025 as weight-on-views scalar parameter tau and
### MaximumSharpeRatioPortfolioOptimizer that accepts a 63-row matrix of 1-day returns.
### </summary>
class BlackLittermanOptimizationPortfolioConstructionModel(PortfolioConstructionModel):
def __init__(self,
rebalance = Resolution.Daily,
portfolioBias = PortfolioBias.LongShort,
lookback = 1,
period = 63,
resolution = Resolution.Daily,
risk_free_rate = 0,
delta = 2.5,
tau = 0.05,
optimizer = None):
"""Initialize the model
Args:
rebalance: Rebalancing parameter. If it is a timedelta, date rules or Resolution, it will be converted into a function.
If None will be ignored.
The function returns the next expected rebalance time for a given algorithm UTC DateTime.
The function returns null if unknown, in which case the function will be called again in the
next loop. Returning current time will trigger rebalance.
portfolioBias: Specifies the bias of the portfolio (Short, Long/Short, Long)
lookback(int): Historical return lookback period
period(int): The time interval of history price to calculate the weight
resolution: The resolution of the history price
risk_free_rate(float): The risk free rate
delta(float): The risk aversion coeffficient of the market portfolio
tau(float): The model parameter indicating the uncertainty of the CAPM prior"""
self.lookback = lookback
self.period = period
self.resolution = resolution
self.risk_free_rate = risk_free_rate
self.delta = delta
self.tau = tau
self.portfolioBias = portfolioBias
lower = 0 if portfolioBias == PortfolioBias.Long else -1
upper = 0 if portfolioBias == PortfolioBias.Short else 1
self.optimizer = MaximumSharpeRatioPortfolioOptimizer(lower, upper, risk_free_rate) if optimizer is None else optimizer
self.sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0)
self.symbolDataBySymbol = {}
# If the argument is an instance of Resolution or Timedelta
# Redefine rebalancingFunc
rebalancingFunc = rebalance
if isinstance(rebalance, int):
rebalance = Extensions.ToTimeSpan(rebalance)
if isinstance(rebalance, timedelta):
rebalancingFunc = lambda dt: dt + rebalance
if rebalancingFunc:
self.SetRebalancingFunc(rebalancingFunc)
def ShouldCreateTargetForInsight(self, insight):
return len(PortfolioConstructionModel.FilterInvalidInsightMagnitude(self.Algorithm, [ insight ])) != 0
def DetermineTargetPercent(self, lastActiveInsights):
targets = {}
# Get view vectors
P, Q = self.get_views(lastActiveInsights)
if P is not None:
returns = dict()
# Updates the BlackLittermanSymbolData with insights
# Create a dictionary keyed by the symbols in the insights with an pandas.Series as value to create a data frame
for insight in lastActiveInsights:
symbol = insight.Symbol
symbolData = self.symbolDataBySymbol.get(symbol, self.BlackLittermanSymbolData(symbol, self.lookback, self.period))
if insight.Magnitude is None:
self.Algorithm.SetRunTimeError(ArgumentNullException('BlackLittermanOptimizationPortfolioConstructionModel does not accept \'None\' as Insight.Magnitude. Please make sure your Alpha Model is generating Insights with the Magnitude property set.'))
return targets
symbolData.Add(insight.GeneratedTimeUtc, insight.Magnitude)
returns[symbol] = symbolData.Return
returns = pd.DataFrame(returns)
# Calculate prior estimate of the mean and covariance
Pi, Sigma = self.get_equilibrium_return(returns)
# Calculate posterior estimate of the mean and covariance
Pi, Sigma = self.apply_blacklitterman_master_formula(Pi, Sigma, P, Q)
# Create portfolio targets from the specified insights
weights = self.optimizer.Optimize(returns, Pi, Sigma)
weights = pd.Series(weights, index = Sigma.columns)
for symbol, weight in weights.items():
for insight in lastActiveInsights:
if str(insight.Symbol) == str(symbol):
# don't trust the optimizer
if self.portfolioBias != PortfolioBias.LongShort and self.sign(weight) != self.portfolioBias:
weight = 0
targets[insight] = weight
break;
return targets
def GetTargetInsights(self):
# Get insight that haven't expired of each symbol that is still in the universe
activeInsights = self.InsightCollection.GetActiveInsights(self.Algorithm.UtcTime)
# Get the last generated active insight for each symbol
lastActiveInsights = []
for sourceModel, f in groupby(sorted(activeInsights, key = lambda ff: ff.SourceModel), lambda fff: fff.SourceModel):
for symbol, g in groupby(sorted(list(f), key = lambda gg: gg.Symbol), lambda ggg: ggg.Symbol):
lastActiveInsights.append(sorted(g, key = lambda x: x.GeneratedTimeUtc)[-1])
return lastActiveInsights
def OnSecuritiesChanged(self, algorithm, changes):
'''Event fired each time the we add/remove securities from the data feed
Args:
algorithm: The algorithm instance that experienced the change in securities
changes: The security additions and removals from the algorithm'''
# Get removed symbol and invalidate them in the insight collection
super().OnSecuritiesChanged(algorithm, changes)
for security in changes.RemovedSecurities:
symbol = security.Symbol
symbolData = self.symbolDataBySymbol.pop(symbol, None)
if symbolData is not None:
symbolData.Reset()
# initialize data for added securities
addedSymbols = { x.Symbol: x.Exchange.TimeZone for x in changes.AddedSecurities }
history = algorithm.History(list(addedSymbols.keys()), self.lookback * self.period, self.resolution)
if history.empty:
return
history = history.close.unstack(0)
symbols = history.columns
for symbol, timezone in addedSymbols.items():
if str(symbol) not in symbols:
continue
symbolData = self.symbolDataBySymbol.get(symbol, self.BlackLittermanSymbolData(symbol, self.lookback, self.period))
for time, close in history[symbol].items():
utcTime = Extensions.ConvertToUtc(time, timezone)
symbolData.Update(utcTime, close)
self.symbolDataBySymbol[symbol] = symbolData
def apply_blacklitterman_master_formula(self, Pi, Sigma, P, Q):
'''Apply Black-Litterman master formula
http://www.blacklitterman.org/cookbook.html
Args:
Pi: Prior/Posterior mean array
Sigma: Prior/Posterior covariance matrix
P: A matrix that identifies the assets involved in the views (size: K x N)
Q: A view vector (size: K x 1)'''
ts = self.tau * Sigma
# Create the diagonal Sigma matrix of error terms from the expressed views
omega = np.dot(np.dot(P, ts), P.T) * np.eye(Q.shape[0])
if np.linalg.det(omega) == 0:
return Pi, Sigma
A = np.dot(np.dot(ts, P.T), inv(np.dot(np.dot(P, ts), P.T) + omega))
Pi = np.squeeze(np.asarray((
np.expand_dims(Pi, axis=0).T +
np.dot(A, (Q - np.expand_dims(np.dot(P, Pi.T), axis=1))))
))
M = ts - np.dot(np.dot(A, P), ts)
Sigma = (Sigma + M) * self.delta
return Pi, Sigma
def get_equilibrium_return(self, returns):
'''Calculate equilibrium returns and covariance
Args:
returns: Matrix of returns where each column represents a security and each row returns for the given date/time (size: K x N)
Returns:
equilibrium_return: Array of double of equilibrium returns
cov: Multi-dimensional array of double with the portfolio covariance of returns (size: K x K)'''
size = len(returns.columns)
# equal weighting scheme
W = np.array([1/size]*size)
# the covariance matrix of excess returns (N x N matrix)
cov = returns.cov()*252
# annualized return
annual_return = np.sum(((1 + returns.mean())**252 -1) * W)
# annualized variance of return
annual_variance = dot(W.T, dot(cov, W))
# the risk aversion coefficient
risk_aversion = (annual_return - self.risk_free_rate ) / annual_variance
# the implied excess equilibrium return Vector (N x 1 column vector)
equilibrium_return = dot(dot(risk_aversion, cov), W)
return equilibrium_return, cov
def get_views(self, insights):
'''Generate views from multiple alpha models
Args
insights: Array of insight that represent the investors' views
Returns
P: A matrix that identifies the assets involved in the views (size: K x N)
Q: A view vector (size: K x 1)'''
try:
P = {}
Q = {}
for model, group in groupby(insights, lambda x: x.SourceModel):
group = list(group)
up_insights_sum = 0.0
dn_insights_sum = 0.0
for insight in group:
if insight.Direction == InsightDirection.Up:
up_insights_sum = up_insights_sum + np.abs(insight.Magnitude)
if insight.Direction == InsightDirection.Down:
dn_insights_sum = dn_insights_sum + np.abs(insight.Magnitude)
q = up_insights_sum if up_insights_sum > dn_insights_sum else dn_insights_sum
if q == 0:
continue
Q[model] = q
# generate the link matrix of views: P
P[model] = dict()
for insight in group:
value = insight.Direction * np.abs(insight.Magnitude)
P[model][insight.Symbol] = value / q
# Add zero for other symbols that are listed but active insight
for symbol in self.symbolDataBySymbol.keys():
if symbol not in P[model]:
P[model][symbol] = 0
Q = np.array([[x] for x in Q.values()])
if len(Q) > 0:
P = np.array([list(x.values()) for x in P.values()])
return P, Q
except:
pass
return None, None
class BlackLittermanSymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, lookback, period):
self.symbol = symbol
self.roc = RateOfChange(f'{symbol}.ROC({lookback})', lookback)
self.roc.Updated += self.OnRateOfChangeUpdated
self.window = RollingWindow[IndicatorDataPoint](period)
def Reset(self):
self.roc.Updated -= self.OnRateOfChangeUpdated
self.roc.Reset()
self.window.Reset()
def Update(self, utcTime, close):
self.roc.Update(utcTime, close)
def OnRateOfChangeUpdated(self, roc, value):
if roc.IsReady:
self.window.Add(value)
def Add(self, time, value):
if self.window.Samples > 0 and self.window[0].EndTime == time:
return;
item = IndicatorDataPoint(self.symbol, time, value)
self.window.Add(item)
@property
def Return(self):
return pd.Series(
data = [x.Value for x in self.window],
index = [x.EndTime for x in self.window])
@property
def IsReady(self):
return self.window.IsReady
def __str__(self, **kwargs):
return f'{self.roc.Name}: {(1 + self.window[0])**252 - 1:.2%}' | apache-2.0 |
sarunya-w/CS402-PROJECT | Project/feature_extraction/feature_extraction_local/hog_local.py | 1 | 4739 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 14:19:50 2015
@author: Sarunya
"""
import os
import sys
import numpy as np
from PIL import Image
import scipy.ndimage
import pickle
from skimage.feature import hog
from skimage import data, color, exposure
import scipy.ndimage
import time
from cv2 import HOGDescriptor
import random
from matplotlib import pyplot as plt
sys.setrecursionlimit(10000)
bs = 200
wd = 8 # theta_range=wd*wd*2
clmax = 11 #clmax is amount of class
theta_dim = 1
dset = 1
nofiles = 1
def timestamp(tt=time.time()):
st=time.time()
print(" took: %.2f sec"%(st-tt))
return st
def normHOG(images_file):
img = np.array(images_file)
width, height = img.shape
# SKIMAGE
#fd , f = hog(img, orientations=8, pixels_per_cell=(height//8, width//8), cells_per_block=(16, 16), visualise=True)
f = hog(img, normalise=True,pixels_per_cell=(height//4, width//4))
print f.shape
#print len(f)
#scaling
#s = (100./f.shape[0],100./f.shape[1])
#normalized histogram of gradient
return f#scipy.ndimage.zoom(f,s,order = 2)
def getValue(images):
f = normHOG(images)
#print f.shape
#rmax,cmax = f.shape
#print f.shape
#sg = np.zeros((2*wd,wd)) #sg[60,30]
#sg[0:wd,:]=np.log(np.abs(f[rmax-wd:rmax,0:wd])) #sg[0:30,:] = f[70:100,0:30]
#sg[wd:2*wd,:]=np.log(np.abs(f[0:wd,0:wd])) #sg[30:60,:] = f[0:30,0:30]
# a =
#print a,len(a)
return f.reshape(-1)
def getVector(images_files,class_files,samples,isTrain):
ts=time.time()
sub_img = []
sub_cs = []
bb = bs//2
for f in xrange(len(images_files)):
img = Image.open(images_files[f]).convert('L')
w , h = img.size
pixels=[]
#print '%02d %s'%(f,images_files[f])
for i in xrange(samples):
r = np.random.randint(bb, h-bb)
c = np.random.randint(bb, w-bb)
pixels.append((c,r))
box = (c-bb, r-bb, c + bb, r + bb)
output_img = img.crop(box)
sub_img.append(getValue(output_img))
if isTrain:
cimg = Image.open(class_files[f]).convert('L')
for p in pixels:
sub_cs.append(cimg.getpixel(p))
if isTrain == True:
sub_img=np.array(sub_img,dtype=np.float32)
sub_cs=np.array(sub_cs,dtype=np.uint32)
sub_cs[sub_cs==255]= clmax - 1
else:
sub_cs=None
ts=timestamp(ts)
return (sub_img ,sub_cs)
if __name__ == '__main__':
isTrain = True #train (test: False)
dsetname = './dataset'
ddesname = 'hog_dataset'
images_files = []
class_files = []
for root, dirs, files in os.walk(dsetname):
for f in files:
if f.endswith('jpg') or f.endswith('JPG') or f.endswith('png') or f.endswith('PNG'):
# read image to array (PIL)
images_files.append(os.path.join(root,f))
img_name = os.path.basename(os.path.join(root,f))
file_name = img_name.split(".")
# check image don't have file type 'bmp'
if isTrain is True:
# check image don't have file type 'bmp'
if os.path.isfile(os.path.join(root , 'bmp/' + file_name[0] + '.bmp')) == False:
print "plese label" , img_name
sys.exit()#break
else:
class_files.append(os.path.join(root , 'bmp/' + file_name[0] + '.bmp'))
#if isTrain is True:
# xarray = random.sample(zip( images_files,class_files), nofiles)
# images_files = [a[0] for a in xarray]
# class_files = [a[1] for a in xarray]
dsetname = dsetname.split("/")
for i in xrange(dset):
vs ,cs = getVector(images_files,class_files,1000,isTrain)
vs = np.array(vs,dtype=np.float32)
#cs=np.array(cs)500
#if cs[0] is None:
# cs = None
if not os.path.exists(ddesname):
os.makedirs(ddesname)
if not os.path.exists(ddesname+'/'+dsetname[1]):
os.makedirs(ddesname+'/'+dsetname[1])
#if not os.path.exists(ddesname+'/'+dsetname[1]+'/'+'100'):
# os.makedirs(ddesname+'/'+dsetname[1]+'/'+'100')
#rfile = ddesname+'/' +dsetname[1] + '/'+ '100' +'/'+ 'dataset%02d.pic'%(i)
rfile = ddesname+'/' +dsetname[1] + '/'+ 'dataset%02d.pic'%(i)
pickleFile = open(rfile, 'wb')
theta_range = vs.shape[1]
size = vs.shape[0]
samples = cs
I = vs
pickle.dump((clmax,theta_dim,theta_range,size,samples,I), pickleFile, pickle.HIGHEST_PROTOCOL)
pickleFile.close()
i = i+1 | mit |
abhishekkrthakur/scikit-learn | sklearn/linear_model/ridge.py | 3 | 38867 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight, compute_class_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _deprecate_dense_cholesky(solver):
if solver == 'dense_cholesky':
warnings.warn(DeprecationWarning(
"The name 'dense_cholesky' is deprecated and will "
"be removed in 0.17. Use 'cholesky' instead. "))
solver = 'cholesky'
return solver
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
solver = _deprecate_dense_cholesky(solver)
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
solver = _deprecate_dense_cholesky(self.solver)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
# get the class weight corresponding to each sample
sample_weight = compute_sample_weight(self.class_weight, y)
else:
sample_weight = None
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0],
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
if sample_weight is None:
sample_weight = 1.
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
alurban/mentoring | tidal_disruption/scripts/kepler_angular_momentum.py | 1 | 2501 | # Imports.
import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
import matplotlib.patheffects as PE
from matplotlib import ticker
# Physical constants.
G = 6.67408e-11 # Newton's constant in m^3 / kg / s
MSun = 1.989e30 # Solar mass in kg
M = 1.4 * MSun # Mass of each neutron star in this example
# Set array of GW frequency values.
f_GW = np.linspace(1e-4, 4000, 500)
# For each GW frequency, compute the orbital separation in meters,
# the orbital velocity, and the angular momentum of stable circular
# orbits.
a = ( G * (2*M) / (pi * f_GW)**2 )**(1./3)
v = ( 2 * pi * G * M * f_GW )**(1./3) / 299792458.
L = ( G**2 * M**5 / (2 * pi * f_GW) )**(1./3)
E = - ( G * M**(5./2) / (2 * L) )**2
# Construct a figure.
fig = plt.figure( figsize=(6, 7.5) )
# Plot the orbital separation as a function of GW frequency.
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(f_GW, a/1e3, 'k', linewidth=2.)
ax1.fill_between(f_GW, 0, 11, facecolor='Tomato', edgecolor='Tomato', alpha=0.5)
ax1.fill_between(f_GW, 0, 12, facecolor='Tomato', edgecolor='Tomato', alpha=0.5)
ax1.fill_between(f_GW, 0, 13, facecolor='Tomato', edgecolor='Tomato', alpha=0.5)
ax1.annotate('neutron star radius', xy=(2000, 6), xycoords='data', size=12, ha="center", va="center",
path_effects=[PE.withStroke(linewidth=3, foreground="w")])
ax1.set_xlim([0, 4000])
ax1.set_ylim([0, 100])
ax1.set_ylabel('orbital separation (km)')
ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
plt.setp(ax1.get_xticklabels(), visible=False)
# Plot the orbital velocity as a fraction of the speed of light.
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(f_GW, v, 'k', linewidth=2.)
ax2.set_xlim([0, 4000])
ax2.set_ylim([0, 1])
ax2.set_ylabel('orbital velocity ($c$)')
ax2.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.2g"))
plt.setp(ax2.get_xticklabels(), visible=False)
# Plot the total angular momentum as a function of frequency.
ax3 = fig.add_subplot(3, 1, 3)
ax3.plot(f_GW, L/1e42, 'k', linewidth=2.)
ax4 = ax3.twinx()
ax4.plot(f_GW, E/1e45, 'k--', linewidth=2.)
ax3.set_xlim([0, 4000])
ax3.set_xlabel('gravitational wave frequency (Hz)')
ax3.xaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
ax3.set_ylim([0, 10])
ax3.set_ylabel('angular momentum (10$^{42}$ J$\cdot$s)')
ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
ax4.set_ylabel('total energy (10$^{45}$ J)')
ax4.yaxis.set_major_formatter(ticker.FormatStrFormatter("%d"))
# Save the figure.
plt.savefig('kepler_angular_momentum.pdf')
| gpl-3.0 |
louispotok/pandas | pandas/tests/reshape/merge/test_join.py | 3 | 31341 | # pylint: disable=E1103
from warnings import catch_warnings
from numpy.random import randn
import numpy as np
import pytest
import pandas as pd
from pandas.compat import lrange
import pandas.compat as compat
from pandas.util.testing import assert_frame_equal
from pandas import DataFrame, MultiIndex, Series, Index, merge, concat
from pandas._libs import join as libjoin
import pandas.util.testing as tm
from pandas.tests.reshape.merge.test_merge import get_test_data, N, NGROUPS
a_ = np.array
class TestJoin(object):
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = libjoin.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = libjoin.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = libjoin.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key1.bar' in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key2.bar' in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
tm.assert_series_equal(merged['MergedA'], target['A'],
check_names=False)
tm.assert_series_equal(merged['MergedD'], target['D'],
check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
assert np.isnan(joined['two']['c'])
assert np.isnan(joined['three']['c'])
# merge column not p resent
pytest.raises(KeyError, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
pytest.raises(ValueError, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with pytest.raises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with pytest.raises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with pytest.raises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_fails_with_wrong_object_type(self):
# GH12081
wrongly_typed = [Series([0, 1]), 2, 'str', None, np.array([0, 1])]
df = DataFrame({'a': [1, 1]})
for obj in wrongly_typed:
with tm.assert_raises_regex(ValueError, str(type(obj))):
merge(obj, df, left_on='a', right_on='a')
with tm.assert_raises_regex(ValueError, str(type(obj))):
merge(df, obj, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notna()]
tm.assert_series_equal(joined['key'], expected['key'],
check_dtype=False)
tm.assert_series_equal(joined['value'], expected['value'],
check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
assert df1['B'].dtype == np.int64
assert df1['D'].dtype == np.bool_
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how='outer')
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how='outer').sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ('b', 'mean') in result
assert 'b' in result
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes['a'] == 'float64'
assert joined.dtypes['b'] == 'float64'
assert joined.dtypes['c'] == 'float32'
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c})
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
assert rs.dtypes['a'] == 'int64'
assert rs.dtypes['b'] == 'float64'
assert rs.dtypes['c'] == 'float32'
assert rs.dtypes['md'] == 'float32'
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
expected = expected[result.columns]
expected['a'] = expected.a.astype('int64')
expected['b'] = expected.b.astype('int64')
assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
tm.assert_index_equal(joined.index, pd.Index(lrange(4)))
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 3, 'a'])
df2 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame({'a': [1, 2, 3, 3, 4],
'b': [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, 'a'])
tm.assert_frame_equal(result, expected)
df3 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 2, 'a'])
df4 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 6, np.nan]},
index=[1, 2, 2, 'a'])
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range('2016-01-01', periods=16, freq='M')
df = DataFrame([i for i in range(len(index))],
index=index, columns=['pnum'])
df2 = concat([df, df])
result = df.join(df2, how='inner', rsuffix='_df2')
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=['pnum', 'pnum_df2'], index=df2.sort_index().index)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
pytest.raises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.loc[:, ['A', 'B']]
df2 = df.loc[:, ['C', 'D']]
df3 = df.loc[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ['x_x', 'y_x', 'x_y',
'y_y', 'x_x', 'y_x', 'x_y', 'y_y']
assert_frame_equal(dta, expected)
def test_panel_join(self):
with catch_warnings(record=True):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.iloc[:2, :10, :3]
p2 = panel.iloc[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.iloc[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
with catch_warnings(record=True):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.loc[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.loc[['ItemB', 'ItemC']]
# Expected index is
#
# ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.loc[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.loc[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.loc[['ItemA']]
expected = no_overlap.join(p1_suf.join(p2_suf))
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
with catch_warnings(record=True):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.iloc[:2], panel.iloc[2:6], panel.iloc[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.iloc[:2, :-5],
panel.iloc[2:6, 2:],
panel.iloc[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(p.iteritems())
joined = panels[0].join(panels[1:], how='inner')
expected = pd.Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = pd.Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
pytest.raises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
pytest.raises(ValueError, panels[0].join, panels[1:],
how='right')
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notna().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isna().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
| bsd-3-clause |
shoyer/xarray | xarray/core/pdcompat.py | 2 | 2346 | # The remove_unused_levels defined here was copied based on the source code
# defined in pandas.core.indexes.muli.py
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.version import LooseVersion
import pandas as pd
# allow ourselves to type checks for Panel even after it's removed
if LooseVersion(pd.__version__) < "0.25.0":
Panel = pd.Panel
else:
class Panel: # type: ignore
pass
def count_not_none(*args) -> int:
"""Compute the number of non-None arguments.
Copied from pandas.core.common.count_not_none (not part of the public API)
"""
return sum(arg is not None for arg in args)
| apache-2.0 |
Interoute/API-fun-and-education | widget-cpu-graphs.py | 1 | 5450 | #! /usr/bin/env python
# Python script for the Interoute Virtual Data Centre API:
# Name: widget-cpu-graphs.py
# Purpose: GUI widget to display graphs of CPU loads on VMs in a VDC
# Requires: class VDCApiCall in the file vdc_api_call.py
# Use the repo: https://github.com/Interoute/API-fun-and-education
# Copyright (C) Interoute Communications Limited, 2014
# This program is used in the blog post:
# http://cloudstore.interoute.com/main/knowledge-centre/blog/vdc-api-programming-fun-part-05
# How to use (for Linux and Mac OS):
# (0) You must have Python version 2.6 or 2.7 installed in your machine
# (1) Create a configuration file '.vdcapi' for access to the VDC API according to the instructions at
# http://cloudstore.interoute.com/main/knowledge-centre/library/vdc-api-introduction-api
# (2) Put this file and the file vdc_api_call.py in any location
# (3) You can run this file using the command 'python widget-cpu-graphs.py&'
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.figure as mplfig
import matplotlib.pyplot as plt
from Tkinter import *
import vdc_api_call as vdc
import json
import os
import datetime
import time
import ast
from collections import deque
class Application(Frame):
def plot_update(self):
test=self.update_cpu_data()
if not(test): # test = 0 so API connection was not made to get fresh data
print("%s: ERROR in plot_update: No API connection or No data returned" % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
self.a.text(min(0.0,-self.plot_interval*(self.plot_points-1)/2.0),0.5,"ERROR: No API connection")
self.fig.canvas.draw()
self.after(self.plot_interval*1000,self.plot_update)
return
self.fig.clf() #clear the current plot
self.a = self.fig.add_subplot(111) #create a new plot
vm_names = map(lambda x: x[0],self.cpuData[0]) #create a list of VM names
#the data to be plotted - this line flattens the data structure (ie. removes a level of list brackets)
data = sum(self.cpuData,[])
current_time = data[-1][1][0]
self.a.set_xlim([-self.plot_interval*(self.plot_points-1),0])
self.a.set_ylim([0,max(10.0,1.1*max(map(lambda x: x[1][1], data)))])
for name in vm_names:
data_per_vm = [d[1] for d in data if d[0]==name]
try:
self.a.plot([-(current_time-d[0]).seconds for d in data_per_vm], [d[1] for d in data_per_vm], label=name, linewidth=3)
except ValueError:
pass #This will trigger when 'cpuused' data is missing for a VM
self.a.legend(loc="center left",prop={'size':8})
self.fig.canvas.draw()
self.after(self.plot_interval*1000,self.plot_update)
def update_cpu_data(self):
# Only data for 'Running' VM will be captured, so non-Running VM will not appear in the plot
timenow=datetime.datetime.now()
try:
result = self.api.listVirtualMachines({})
testdict = result['virtualmachine'][0] #this should throw exception if result has no content
self.cpuData.append([[vm['name'],[timenow,self.get_cpuused(vm)]]
for vm in result['virtualmachine'] if vm['state']=='Running'] )
return 1
except:
return 0 # data not returned (mostly when connection to API fails)
#Test if cpuused is available for returned data about a VM
#Returns integer value of %age or 'NA'
def get_cpuused(self,vm):
if 'cpuused' in vm:
return int(vm['cpuused'][:-1])
else:
return 'NA'
def refresh_plot(self):
#this method is called when the 'REFRESH' button is pressed
self.plot_update()
def createWidgets(self):
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT"
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "right"})
self.refresh = Button(self)
self.refresh["text"] = "REFRESH",
self.refresh["command"] = self.refresh_plot
self.refresh.pack({"side": "left"})
def __init__(self, master=None):
Frame.__init__(self, master)
config_file = os.path.join(os.path.expanduser('~'), '.vdcapi')
if os.path.isfile(config_file):
with open(config_file) as fh:
data = fh.read()
config = json.loads(data)
api_url = config['api_url']
apiKey = config['api_key']
secret = config['api_secret']
# Create the api access object
self.api = vdc.VDCApiCall(api_url, apiKey, secret)
self.plot_interval = 10
self.plot_points = 100
# Create deque object to hold cpu data
self.cpuData = deque([],self.plot_points)
# Initialise the plot
self.fig = mplfig.Figure(figsize=(9,4), dpi=100)
self.a = self.fig.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.fig, master=self)
self.plot_update()
self.fig.canvas.draw()
self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
self.pack()
self.createWidgets()
root = Tk()
root.title("VM CPU Load Graph widget")
app = Application(master=root)
app.mainloop()
root.destroy()
| apache-2.0 |
MartinDelzant/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
NicoRahm/CGvsPhoto | CGvsPhoto/model.py | 1 | 57552 | """
The ``model`` module
======================
Contains the class Model which implements the core model for CG detection,
training, testing and visualization functions.
"""
import os
import time
import random
from . import image_loader as il
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.colors as mcolors
import csv
import configparser
import numpy as np
from PIL import Image
GPU = '/gpu:0'
config = 'server'
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import accuracy_score as acc
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.preprocessing import normalize
import pickle
# seed initialisation
print("\n random initialisation ...")
random_seed = int(time.time() % 10000 )
random.seed(random_seed) # for reproducibility
print(' random seed =', random_seed)
# tool functions
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def image_summaries(var, name):
tf.summary.image(name + '_1', var[:,:,:,0:1], max_outputs = 1)
tf.summary.image(name + '_2', var[:,:,:,1:2], max_outputs = 1)
tf.summary.image(name + '_3', var[:,:,:,2:3], max_outputs = 1)
# tf.summary.image(name + '_4', var[:,:,:,3:4], max_outputs = 1)
# tf.summary.image(name + '_5', var[:,:,:,4:5], max_outputs = 1)
# tf.summary.image(name + '_6', var[:,:,:,5:6], max_outputs = 1)
# tf.summary.image(name + '_7', var[:,:,:,6:7], max_outputs = 1)
# tf.summary.image(name + '_8', var[:,:,:,7:8], max_outputs = 1)
def filter_summary(filters, name):
tf.summary.image(name + '_1', tf.stack([filters[:,:,0,0:1]]), max_outputs = 1)
tf.summary.image(name + '_2', tf.stack([filters[:,:,0,1:2]]), max_outputs = 1)
tf.summary.image(name + '_3', tf.stack([filters[:,:,0,2:3]]), max_outputs = 1)
tf.summary.image(name + '_4', tf.stack([filters[:,:,0,3:4]]), max_outputs = 1)
tf.summary.image(name + '_5', tf.stack([filters[:,:,0,4:5]]), max_outputs = 1)
tf.summary.image(name + '_6', tf.stack([filters[:,:,0,5:6]]), max_outputs = 1)
# tf.summary.image(name + '_7', tf.stack([filters[:,:,0,6:7]]), max_outputs = 1)
# tf.summary.image(name + '_8', tf.stack([filters[:,:,0,7:8]]), max_outputs = 1)
def weight_variable(shape, nb_input, seed = None):
"""Creates and initializes (truncated normal distribution) a variable weight Tensor with a defined shape"""
sigma = np.sqrt(2/nb_input)
# print(sigma)
initial = tf.truncated_normal(shape, stddev=sigma, seed = random_seed)
return tf.Variable(initial)
def bias_variable(shape):
"""Creates and initializes (truncated normal distribution with 0.5 mean) a variable bias Tensor with a defined shape"""
initial = tf.truncated_normal(shape, mean = 0.5, stddev=0.1, seed = random_seed)
return tf.Variable(initial)
def conv2d(x, W):
"""Returns the 2D convolution between input x and the kernel W"""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""Returns the result of max-pooling on input x with a 2x2 window"""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def avg_pool_2x2(x):
"""Returns the result of average-pooling on input x with a 2x2 window"""
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def max_pool_10x10(x):
"""Returns the result of max-pooling on input x with a 10x10 window"""
return tf.nn.max_pool(x, ksize=[1, 10, 10, 1],
strides=[1, 10, 10, 1], padding='SAME')
def avg_pool_10x10(x):
"""Returns the result of average-pooling on input x with a 10x10 window"""
return tf.nn.avg_pool(x, ksize=[1, 10, 10, 1],
strides=[1, 10, 10, 1], padding='SAME')
def histogram(x, nbins):
"""Returns the Tensor containing the nbins values of the normalized histogram of x"""
h = tf.histogram_fixed_width(x, value_range = [-1.0,1.0],
nbins = nbins, dtype = tf.float32)
return(h)
def gaussian_func(mu, x, n, sigma):
"""Returns the average of x composed with a gaussian function
:param mu: The mean of the gaussian function
:param x: Input values
:param n: Number of input values
:param sigma: Variance of the gaussian function
:type mu: float
:type x: Tensor
:type n: int
:type sigma: float
"""
gauss = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
# return(tf.reduce_sum(gauss.pdf(xmax - tf.nn.relu(xmax - x))/n))
return(tf.reduce_sum(gauss.pdf(x)/n))
def gaussian_kernel(x, nbins = 8, values_range = [0, 1], sigma = 0.1,image_size = 100):
"""Returns the values of x's nbins gaussian histogram
:param x: Input values (supposed to be images)
:param nbins: Number of bins (different gaussian kernels)
:param values_range: The range of the x values
:param sigma: Variance of the gaussian functions
:param image_size: The size of the images x (for normalization)
:type x: Tensor
:type nbins: int
:type values_range: table
:type sigma: float
:type image_size: int
"""
mu_list = np.float32(np.linspace(values_range[0], values_range[1], nbins + 1))
n = np.float32(image_size**2)
function_to_map = lambda m : gaussian_func(m, x, n, sigma)
return(tf.map_fn(function_to_map, mu_list))
def plot_gaussian_kernel(nbins = 8, values_range = [0, 1], sigma = 0.1):
"""Plots the gaussian kernels used for estimating the histogram"""
r = values_range[1] - values_range[0]
mu_list = []
for i in range(nbins+1):
mu_list.append(values_range[0] + i*r/(nbins+1))
range_plot = np.linspace(values_range[0]-0.1, values_range[1]+0.1, 1000)
plt.figure()
for mu in mu_list:
plt.plot(range_plot, np.exp(-(range_plot-mu)**2/(sigma**2)))
plt.title("Gaussian kernels used for estimating the histograms")
plt.show()
def classic_histogram_gaussian(x, k, nbins = 8, values_range = [0, 1], sigma = 0.6):
"""Computes gaussian histogram values for k input images"""
function_to_map = lambda y: tf.stack([gaussian_kernel(y[:,:,i], nbins, values_range, sigma) for i in range(k)])
res = tf.map_fn(function_to_map, x)
return(res)
def stat(x):
"""Computes statistical features for an image x : mean, min, max and variance"""
# sigma = tf.reduce_mean((x - tf.reduce_mean(x))**2)
return(tf.stack([tf.reduce_mean(x), tf.reduce_min(x), tf.reduce_max(x), tf.reduce_mean((x - tf.reduce_mean(x))**2)]))
def compute_stat(x, k):
"""Computes statistical features for k images"""
# function_to_map = lambda y: tf.stack([stat(y[:,:,i]) for i in range(k)])
# res = tf.map_fn(function_to_map, x)
res = tf.transpose(tf.stack([tf.reduce_mean(x, axis=[1,2]), tf.reduce_min(x, axis=[1,2]), tf.reduce_max(x, axis=[1,2]), tf.reduce_mean((x - tf.reduce_mean(x, axis=[1,2], keep_dims = True))**2, axis=[1,2])]), [1,2,0])
return(res)
class Model:
"""
Class Model
======================
Defines a model for single-image CG detection and numerous methods to :
- Create the TensorFlow graph of the model
- Train the model on a specific database
- Reload past weights
- Test the model (simple classification, full-size images with boosting and splicing)
- Visualize some images and probability maps
"""
def __init__(self, database_path, image_size, config = 'Personal', filters = [32, 64],
feature_extractor = 'Stats', remove_context = False,
nbins = 10, remove_filter_size = 3, batch_size = 50,
using_GPU = False, only_green = True):
"""Defines a model for single-image classification
:param database_path: Absolute path to the default patch database (training, validation and testings are performed on this database)
:param image_size: Size of the patches supposed squared
:param config: Name of the section to use in the config.ini file for configuring directory paths (weights, training summaries and visualization dumping)
:param filters: Table with the number of output filters of each layer
:param feature_extractor: Two choices 'Stats' or 'Hist' for the feature extractor
:param nbins: Number of bins on the histograms. Used only if the feature_extractor parameter is 'Hist'
:param batch_size: The size of the batch for training
:param using_GPU: Whether to use GPU for computation or not
:type database_path: str
:type image_size: int
:type config: str
:type filters: table
:type feature_extractor: str
:type nbins: int
:type batch_size: int
:type using_GPU: bool
"""
clear = lambda: os.system('clear')
clear()
print(' tensorFlow version: ', tf.__version__)
# read the configuration file
conf = configparser.ConfigParser()
conf.read('config.ini')
if config not in conf:
raise ValueError(config + ' is not in the config.ini file... Please create the corresponding section')
self.dir_ckpt = conf[config]['dir_ckpt']
self.dir_summaries = conf[config]['dir_summaries']
self.dir_visualization = conf[config]['dir_visualization']
print(' Check-points directory : ' + self.dir_ckpt)
print(' Summaries directory : ' + self.dir_summaries)
print(' Visualizations directory : ' + self.dir_visualization)
# setting the parameters of the model
self.nf = filters
self.nl = len(self.nf)
self.filter_size = 3
self.feature_extractor = 'Stats'
if self.feature_extractor != 'Stats' and self.feature_extractor != 'Hist':
raise ValueError('''Feature extractor must be 'Stats' or 'Hist' ''')
self.database_path = database_path
self.image_size = image_size
self.batch_size = batch_size
self.nbins = nbins
self.using_GPU = using_GPU
self.remove_context = remove_context
self.remove_filter_size = remove_filter_size
self.only_green = only_green
# getting the database
self.import_database()
self.nb_channels = self.data.nb_channels
# create the TensorFlow graph
if using_GPU:
with tf.device(GPU):
self.create_graph(nb_class = self.nb_class,
feature_extractor = self.feature_extractor,
nl = self.nl, nf = self.nf, filter_size = self.filter_size)
else:
self.create_graph(nb_class = self.nb_class,
feature_extractor = self.feature_extractor,
nl = self.nl, nf = self.nf, filter_size = self.filter_size)
def import_database(self):
"""Creates a Database_loader to load images from the distant database"""
# load data
print(' import data : image_size = ' +
str(self.image_size) + 'x' + str(self.image_size) + '...')
self.data = il.Database_loader(self.database_path, self.image_size,
proportion = 1, only_green=self.only_green)
self.nb_class = self.data.nb_class
def create_graph(self, nb_class, nl = 2, nf = [32, 64], filter_size = 3,
feature_extractor = 'Stats'):
"""Creates the TensorFlow graph"""
print(' create model ...')
# input layer. One entry is a float size x size, 3-channels image.
# None means that the number of such vector can be of any lenght.
if feature_extractor == 'Hist':
print(' Model with histograms.')
else:
print(' Model with statistics.')
graph = tf.Graph()
with graph.as_default():
with tf.name_scope('Input_Data'):
x = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.nb_channels])
self.x = x
# reshape the input data:
x_image = tf.reshape(x, [-1,self.image_size, self.image_size, self.nb_channels])
with tf.name_scope('Image_Visualization'):
tf.summary.image('Input_Data', x_image)
# first conv net layer
if self.remove_context:
print(' Creating layer 1 - Shape : ' + str(self.remove_filter_size) + 'x' +
str(self.remove_filter_size) + 'x' + str(self.nb_channels) + 'x' + str(nf[0]))
else:
print(' Creating layer 1 - Shape : ' + str(self.filter_size) + 'x' +
str(self.filter_size) + 'x' + str(self.nb_channels) + 'x' + str(nf[0]))
with tf.name_scope('Conv1'):
with tf.name_scope('Weights'):
if self.remove_context:
W_conv1 = weight_variable([self.remove_filter_size, self.remove_filter_size, self.nb_channels, nf[0]],
nb_input = self.remove_filter_size*self.remove_filter_size*self.nb_channels,
seed = random_seed)
else:
W_conv1 = weight_variable([self.filter_size, self.filter_size, self.nb_channels, nf[0]],
nb_input = self.filter_size*self.filter_size*self.nb_channels,
seed = random_seed)
self.W_conv1 = W_conv1
with tf.name_scope('Bias'):
b_conv1 = bias_variable([nf[0]])
# relu on the conv layer
if self.remove_context:
h_conv1 = conv2d(x_image, W_conv1)
else:
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1,
name = 'Activated_1')
self.h_conv1 = h_conv1
self.W_convs = [W_conv1]
self.b_convs = [b_conv1]
self.h_convs = [h_conv1]
image_summaries(self.h_convs[0], 'hconv1')
filter_summary(self.W_convs[0], 'Wconv1')
for i in range(1, nl):
print(' Creating layer ' + str(i+1) + ' - Shape : ' + str(self.filter_size) + 'x' +
str(self.filter_size) + 'x' + str(nf[i-1]) + 'x' + str(nf[i]))
# other conv
with tf.name_scope('Conv' + str(i+1)):
with tf.name_scope('Weights'):
W_conv2 = weight_variable([self.filter_size, self.filter_size, nf[i-1], nf[i]],
self.filter_size*self.filter_size*nf[i-1])
self.W_convs.append(W_conv2)
with tf.name_scope('Bias'):
b_conv2 = bias_variable([nf[i]])
self.b_convs.append(b_conv2)
h_conv2 = tf.nn.relu(conv2d(self.h_convs[i-1], W_conv2) + b_conv2,
name = 'Activated_2')
self.h_convs.append(h_conv2)
print(' Creating feature extraction layer')
nb_filters = nf[nl-1]
if self.feature_extractor == 'Hist':
# Histograms
nbins = self.nbins
size_flat = (nbins + 1)*nb_filters
range_hist = [0,1]
sigma = 0.07
# plot_gaussian_kernel(nbins = nbins, values_range = range_hist, sigma = sigma)
with tf.name_scope('Gaussian_Histogram'):
hist = classic_histogram_gaussian(self.h_convs[nl-1], k = nb_filters,
nbins = nbins,
values_range = range_hist,
sigma = sigma)
self.hist = hist
flatten = tf.reshape(hist, [-1, size_flat], name = "Flatten_Hist")
self.flatten = flatten
else:
nb_stats = 4
size_flat = nb_filters*nb_stats
with tf.name_scope('Simple_statistics'):
s = compute_stat(self.h_convs[nl-1], nb_filters)
self.stat = s
flatten = tf.reshape(s, [-1, size_flat], name = "Flattened_Stat")
self.flatten = flatten
print(' Creating MLP ')
# Densely Connected Layer
# we add a fully-connected layer with 1024 neurons
with tf.variable_scope('Dense1'):
with tf.name_scope('Weights'):
W_fc1 = weight_variable([size_flat, 1024],
nb_input = size_flat)
with tf.name_scope('Bias'):
b_fc1 = bias_variable([1024])
# put a relu
h_fc1 = tf.nn.relu(tf.matmul(flatten, W_fc1) + b_fc1,
name = 'activated')
# dropout
with tf.name_scope('Dropout1'):
keep_prob = tf.placeholder(tf.float32)
self.keep_prob = keep_prob
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
self.h_fc1 = h_fc1
# readout layer
with tf.variable_scope('Readout'):
with tf.name_scope('Weights'):
W_fc3 = weight_variable([1024, nb_class],
nb_input = 1024)
with tf.name_scope('Bias'):
b_fc3 = bias_variable([nb_class])
y_conv = tf.matmul(h_fc1_drop, W_fc3) + b_fc3
self.y_conv = y_conv
# support for the learning label
y_ = tf.placeholder(tf.float32, [None, nb_class])
self.y_ = y_
# Define loss (cost) function and optimizer
print(' setup loss function and optimizer ...')
# softmax to have normalized class probabilities + cross-entropy
with tf.name_scope('cross_entropy'):
softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y_conv)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(softmax_cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy_mean)
# with tf.name_scope('enforce_constraints'):
if self.remove_context:
# self.zero_op = tf.assign(ref = self.W_convs[0][1,1,0,:], value = tf.zeros([nf[0]]))
center = int(self.remove_filter_size/2)
self.zero_op = tf.scatter_nd_update(ref = self.W_convs[0], indices = tf.constant([[center,center,0,i] for i in range(nf[0])]), updates = tf.zeros(nf[0]))
self.norm_op = tf.assign(ref = self.W_convs[0], value = tf.divide(self.W_convs[0],tf.reduce_sum(self.W_convs[0], axis = 3, keep_dims = True)))
self.minus_one_op = tf.scatter_nd_update(ref = self.W_convs[0], indices = tf.constant([[center,center,0,i] for i in range(nf[0])]), updates = tf.constant([-1.0 for i in range(nf[0])]))
self.norm = tf.reduce_sum(self.W_convs[0], axis = 3, keep_dims = True)
self.train_step = train_step
print(' test ...')
# 'correct_prediction' is a function. argmax(y, 1), here 1 is for the axis number 1
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
# 'accuracy' is a function: cast the boolean prediction to float and average them
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
self.accuracy = accuracy
self.graph = graph
print(' model created.')
def validation_testing(self, it, nb_iterations = 20, batch_size = 50,
plot_histograms = False, range_hist = [0.,1.],
selected_hist_nb = 8, run_name = '',
show_filters = True):
"""Computes validation accuracy during training and plots some visualization.
Returns the accuracy on the validation data. Can also plot some histograms of the filtered images
(if the Hist layer is selected) and the first layer's filters.
:param it: The number of the iteration in the training process
:param nb_iterations: The number of batches to process on the validation set
:param batch_size: Batch size when loading the validation images
:param plot_hitograms: Whether to plot the histograms or not
:param range_hist: The value range for plotting the histograms
:param selected_hist_nb: The number of histograms to plot
:param run_name: The name of the training run
:param show_filters: Whether to show the first layer's filters
:type it: int
:type nb_iterations: int
:type batch_size: int
:type plot_hitograms: bool
:type range_hist: table
:type selected_hist_nb: int
:type run_name: str
:type show_filters: bool
"""
if show_filters:
nb_height = 4
nb_width = int(self.nf[0]/nb_height)
img, axes = plt.subplots(nrows = nb_width, ncols = nb_height)
gs1 = gridspec.GridSpec(nb_height, nb_width)
for i in range(self.nf[0]):
ax1 = plt.subplot(gs1[i])
ax1.axis('off')
im = plt.imshow(self.W_conv1[:,:,0,i].eval(), cmap = 'jet', vmin = -5, vmax = 5)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.autoscale(False)
ax1.set_adjustable('box-forced')
# axes.get_yaxis().set_ticks([])
# plt.ylabel('Kernel ' + str(i), fontsize = 5.0)
# ax1.set_ylabel('Kernel ' + str(i), fontsize = 5.0)
ax1.set_title("Filter " + str(i + 1), fontsize = 12.0)
img.subplots_adjust(wspace = 0.1, hspace = 0.6, right = 0.7)
cbar_ax = img.add_axes([0.75, 0.15, 0.03, 0.7])
cbar = img.colorbar(im, ticks=[-5, 0, 5], cax=cbar_ax)
cbar.ax.set_yticklabels(['< -5', '0', '> 5'])
plt.show(img)
plt.close()
if plot_histograms and self.feature_extractor != 'Hist':
print("Can't plot the histograms, feature extractor is 'Stats'...")
validation_batch_size = batch_size
validation_accuracy = 0
# validation_auc = 0
self.data.validation_iterator = 0
if plot_histograms:
nb_CGG = 0
hist_CGG = [np.zeros((self.nbins+1,)) for i in range(selected_hist_nb)]
nb_real = 0
hist_real = [np.zeros((self.nbins+1,)) for i in range(selected_hist_nb)]
for _ in range( nb_iterations ) :
batch_validation = self.data.get_batch_validation(batch_size=validation_batch_size,
crop = False,
random_flip_flop = True,
random_rotate = True)
feed_dict = {self.x: batch_validation[0],
self.y_: batch_validation[1],
self.keep_prob: 1.0}
validation_accuracy += self.accuracy.eval(feed_dict)
if plot_histograms and self.feature_extractor == 'Hist':
# Computing the mean histogram for each class
hist_plot = self.hist.eval(feed_dict)
for k in range(validation_batch_size):
if batch_validation[1][k][0] == 1.:
nb_real +=1
is_real = True
else:
nb_CGG += 1
is_real = False
for j in range(selected_hist_nb):
for l in range(self.nbins+1):
if is_real:
hist_real[j][l] += hist_plot[k,j,l]
else:
hist_CGG[j][l] += hist_plot[k,j,l]
for p in range(selected_hist_nb):
hist_CGG[p] /= nb_CGG
hist_real[p] /= nb_real
if plot_histograms and self.feature_extractor == 'Hist':
# Plotting mean histogram for CGG
fig = plt.figure(1)
for k in range(selected_hist_nb):
plt.subplot(selected_hist_nb/2, 2, k+1)
plt.bar(np.linspace(range_hist[0], range_hist[1], self.nbins+1),
hist_CGG[k], width = 1/(self.nbins + 1))
plt.plot(np.linspace(range_hist[0], range_hist[1], self.nbins+1),
hist_CGG[k], 'r')
fig.suptitle("Mean histogram for CGG", fontsize=14)
plt.show()
plt.close()
# Plotting mean histogram for Real
fig = plt.figure(2)
for k in range(selected_hist_nb):
plt.subplot(selected_hist_nb/2, 2, k+1)
plt.bar(np.linspace(range_hist[0], range_hist[1], self.nbins+1),
hist_real[k], width = 1/(self.nbins + 1))
plt.plot(np.linspace(range_hist[0], range_hist[1],self.nbins+1),
hist_real[k], 'r')
fig.suptitle("Mean histogram for Real", fontsize=14)
plt.show()
plt.close()
validation_accuracy /= nb_iterations
print(" step %d, training accuracy %g (%d validations tests)"%(it, validation_accuracy, validation_batch_size*nb_iterations))
return(validation_accuracy)
def train(self, nb_train_batch, nb_test_batch,
nb_validation_batch, validation_frequency = 10, show_filters = False):
"""Trains the model on the selected database training set.
Trains a blank single-image classifer (or initialized with some pre-trained weights).
The weights are saved in the corresponding file along training, validation is computed,
showed and saved at the end. Finnaly, summaries are generated.
Testing is also performed for single-images.
:param nb_train_batch: The number of batches to train (can be on multiple epochs)
:param nb_test_batch: The number of batches to test
:param nb_validation_batch: The number of batch for validation
:param validation_frequency: Performs validation testing every validation_frequency batches
:param show_filters: Whether to show the first layer's filters at each validation step
:type nb_train_batch: int
:type nb_test_batch: int
:type nb_validation_batch: int
:type validation_frequency: int
:type show_filters: bool
"""
run_name = input(" Choose a name for the run : ")
path_save = self.dir_ckpt + run_name
acc_name = self.dir_summaries + run_name + "/validation_accuracy_" + run_name + ".csv"
# computation time tick
start_clock = time.clock()
start_time = time.time()
batch_clock = None
# start a session
print(' start session ...')
with tf.Session(graph=self.graph, config=tf.ConfigProto(log_device_placement=self.using_GPU)) as sess:
merged = tf.summary.merge_all()
if not os.path.exists(self.dir_summaries + run_name):
os.mkdir(self.dir_summaries + run_name)
train_writer = tf.summary.FileWriter(self.dir_summaries + run_name,
sess.graph)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
saver = tf.train.Saver()
print(' variable initialization ...')
restore_weigths = input("\nRestore weight from previous session ? (y/N) : ")
if restore_weigths == 'y':
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
print('\n Model restored\n')
# Train
print(' train ...')
start_clock = time.clock()
start_time = time.time()
validation_accuracy = []
for i in range(nb_train_batch):
# enforce constraints on first layer :
if self.remove_context:
sess.run(self.zero_op)
sess.run(self.norm_op)
sess.run(self.minus_one_op)
print(self.W_conv1.eval()[:,:,0,0])
# print(self.W_conv1.eval()[:,:,0,0])
# evry validation_frequency batches, test the accuracy
if i%validation_frequency == 0 :
if i%100 == 0:
plot_histograms = False
else:
plot_histograms = False
v = self.validation_testing(i, nb_iterations = nb_validation_batch,
batch_size = self.batch_size,
plot_histograms = plot_histograms,
run_name = run_name,
show_filters = show_filters)
validation_accuracy.append(v)
# regular training
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.65}
summary, _ = sess.run([merged, self.train_step], feed_dict = feed_dict)
train_writer.add_summary(summary, i)
# Saving weights every 100 batches
if i%100 == 0:
path_save_batch = path_save + str(i) + ".ckpt"
print(' saving weights in file : ' + path_save_batch)
saver.save(sess, path_save_batch)
print(' OK')
if batch_clock is not None:
time_elapsed = (time.time()-batch_clock)
print(' Time last 100 batchs : ', time.strftime("%H:%M:%S",time.gmtime(time_elapsed)))
remaining_time = time_elapsed * int((nb_train_batch - i)/100)
print(' Remaining time : ', time.strftime("%H:%M:%S",time.gmtime(remaining_time)))
batch_clock = time.time()
print(' saving validation accuracy...')
file = open(acc_name, 'w', newline='')
try:
writer = csv.writer(file)
for v in validation_accuracy:
writer.writerow([str(v)])
finally:
file.close()
print(' done.')
# final test
print(' final test ...')
test_accuracy = 0
# test_auc = 0
nb_iterations = nb_test_batch
self.data.test_iterator = 0
scores = np.zeros([nb_test_batch*self.batch_size,])
y_test = np.zeros([nb_test_batch*self.batch_size,])
for k in range( nb_iterations ) :
batch_test = self.data.get_batch_test(self.batch_size, False, True, True)
feed_dict = {self.x:batch_test[0], self.y_: batch_test[1], self.keep_prob: 1.0}
test_accuracy += self.accuracy.eval(feed_dict)
# print(scores[k*self.batch_size:(k+1)*self.batch_size].shape)
scores[k*self.batch_size:(k+1)*self.batch_size] = normalize(self.y_conv.eval(feed_dict))[:,1]
y_test[k*self.batch_size:(k+1)*self.batch_size] = batch_test[1][:,1]
# test_auc += sess.run(auc, feed_dict)[0]
test_accuracy /= nb_iterations
print(" test accuracy %g"%test_accuracy)
fpr, tpr, _ = roc_curve(y_test, scores)
filename = '/home/smg/v-nicolas/ROC/' + run_name + '.pkl'
print('Saving tpr and fpr in file : ' + filename)
pickle.dump((fpr, tpr), open(filename, 'wb'))
# test_auc /= (nb_iterations - 1)
# print(" test AUC %g"%test_auc)
if nb_train_batch > validation_frequency:
plt.figure()
plt.plot(np.linspace(0,nb_train_batch,int(nb_train_batch/10)), validation_accuracy)
plt.title("Validation accuracy during training")
plt.xlabel("Training batch")
plt.ylabel("Validation accuracy")
plt.show()
plt.close()
# done
print(" computation time (cpu) :",time.strftime("%H:%M:%S", time.gmtime(time.clock()-start_clock)))
print(" computation time (real):",time.strftime("%H:%M:%S", time.gmtime(time.time()-start_time)))
print(' done.')
def show_histogram(self):
"""Plots histograms of the last layer outputs for some images"""
with tf.Session(graph=self.graph) as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
saver = tf.train.Saver()
print(' variable initialization ...')
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
print('\n Model restored\n')
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
conv = self.h_conv2.eval(feed_dict = feed_dict)
for i in range(self.batch_size):
plt.figure()
plt.hist(np.reshape(conv[i,:,:,0], (self.image_size*self.image_size,)))
plt.show()
def mean_histogram(self, nb_images = 5000):
print(" Showing the histograms of filtered images...")
with tf.Session(graph=self.graph) as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
saver = tf.train.Saver()
print(' variable initialization ...')
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
print('\n Model restored\n')
j = 0
nreal = 0
ncgg = 0
while j < nb_images:
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
conv = self.h_conv1.eval(feed_dict = feed_dict)
nbins = 150
hist_values_CGG = np.zeros((nbins,))
hist_values_Real = np.zeros((nbins,))
for i in range(self.batch_size):
if batch[1][i][0] == 1:
# print(conv[i,:,:,15])
hist_values_Real += np.histogram(conv[i,:,:,1], bins = nbins, range = (0., 1.))[0]
nreal += 1
else:
# print(conv[i,:,:,15])
hist_values_CGG += np.histogram(conv[i,:,:,1], bins = nbins, range = (0., 1.))[0]
ncgg += 1
j+= self.batch_size
hist_values_CGG /= ncgg
hist_values_Real /= nreal
plt.figure()
plt.plot(np.linspace(0,1, nbins), hist_values_Real, color = 'b',
label = 'Real')
plt.plot(np.linspace(0,1, nbins), hist_values_CGG, color = 'r',
label = 'CGG')
plt.legend()
plt.show()
def lda_training(self, nb_train_batch, nb_test_batch):
"""Trains a LDA classifier on top of the feature extractor.
Restores the weights of the feature extractor and trains a new LDA classifier. The trained LDA can then be reused.
Finally tests the pipeline on the test dataset.
:param nb_train_batch: The number of batches to train (can be on multiple epochs)
:param nb_test_batch: The number of batches to test
:type nb_train_batch: int
:type nb_test_batch: int
"""
self.lda_classifier = LinearDiscriminantAnalysis()
# start a session
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
# training the LDA classifier
features = []
labels = []
for i in range(nb_train_batch):
if (i%10 == 0):
print("Computing features for training batch " + str(i) + '/' + str(nb_train_batch))
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features.append(h)
labels.append(np.argmax(np.array(batch[1]), 1))
features = np.reshape(np.array(features), (self.batch_size*nb_train_batch, features[0].shape[1]))
labels = np.reshape(np.array(labels), (self.batch_size*nb_train_batch,))
print(features.shape)
print(labels.shape)
self.lda_classifier.fit(features, labels)
print(' Testing ...')
# test_auc = 0
features_test = []
labels_test = []
for _ in range(nb_test_batch) :
batch_test = self.data.get_batch_test(self.batch_size, False, True, True)
feed_dict = {self.x:batch_test[0], self.y_: batch_test[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features_test.append(h)
labels_test.append(np.argmax(np.array(batch_test[1]), 1))
features_test = np.reshape(np.array(features_test), (self.batch_size*nb_test_batch, features_test[0].shape[1]))
labels_test = np.reshape(np.array(labels_test), (self.batch_size*nb_test_batch,))
labels_pred = self.lda_classifier.predict(features_test)
test_accuracy = acc(labels_pred, labels_test)
print(" test accuracy %g"%test_accuracy)
self.clf = self.lda_classifier
def svm_training(self, nb_train_batch, nb_test_batch):
"""Trains a SVM classifier (RBF kernel) on top of the feature extractor.
Restores the weights of the feature extractor and trains a new SVM classifier with RBF kernel. The trained SVM can then be reused.
Finally tests the pipeline on the test dataset.
:param nb_train_batch: The number of batches to train (can be on multiple epochs)
:param nb_test_batch: The number of batches to test
:type nb_train_batch: int
:type nb_test_batch: int
"""
self.svm_classifier = SVC(probability = True)
# start a session
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
# training the LDA classifier
features = []
labels = []
for i in range(nb_train_batch):
if (i%10 == 0):
print("Computing features for training batch " + str(i) + '/' + str(nb_train_batch))
batch = self.data.get_next_train_batch(self.batch_size, False, True, True)
feed_dict = {self.x: batch[0], self.y_: batch[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features.append(h)
labels.append(np.argmax(np.array(batch[1]), 1))
features = np.reshape(np.array(features), (self.batch_size*nb_train_batch, features[0].shape[1]))
labels = np.reshape(np.array(labels), (self.batch_size*nb_train_batch,))
print(features.shape)
print(labels.shape)
self.svm_classifier.fit(features, labels)
print(' Testing ...')
# test_auc = 0
features_test = []
labels_test = []
for _ in range(nb_test_batch) :
batch_test = self.data.get_batch_test(self.batch_size, False, True, True)
feed_dict = {self.x:batch_test[0], self.y_: batch_test[1], self.keep_prob: 1.0}
h = self.flatten.eval(feed_dict = feed_dict)
features_test.append(h)
labels_test.append(np.argmax(np.array(batch_test[1]), 1))
features_test = np.reshape(np.array(features_test), (self.batch_size*nb_test_batch, features_test[0].shape[1]))
labels_test = np.reshape(np.array(labels_test), (self.batch_size*nb_test_batch,))
labels_pred = self.svm_classifier.predict(features_test)
test_accuracy = acc(labels_pred, labels_test)
print(" test accuracy %g"%test_accuracy)
self.clf = self.svm_classifier
def test_total_images(self, test_data_path, nb_images,
minibatch_size = 25, decision_rule = 'majority_vote',
show_images = False,
save_images = False,
only_green = True, other_clf = False):
"""Performs boosting for classifying full-size images.
Decomposes each image into patches (with size = self.image_size), computes the posterior probability of each class
and uses a decision rule to classify the full-size image.
Optionnaly plots or save the probability map and the original image in the visualization directory.
:param test_data_path: The absolute path to the test dataset. Must contain two directories : CGG/ and Real/
:param nb_images: The number of images to test
:param minibatch_size: The size of the batch to process the patches
:param decision_rule: The decision rule to use to aggregate patches prediction
:param show_images: Whether to show images or not
:param save_images: Whether to save images or not
:param only_green: Whether to take only the green channel of the image
:param other_clf: Whether to use aother classifier (LDA or SVM). If True, takes the lastly trained
:type test_data_path: str
:type nb_images: int
:type minibatch_size: int
:type decision_rule: str
:type show_images: bool
:type save_images: bool
:type only_green: bool
:type other_clf:bool
"""
valid_decision_rule = ['majority_vote', 'weighted_vote']
if decision_rule not in valid_decision_rule:
raise NameError(decision_rule + ' is not a valid decision rule.')
test_name = input(" Choose a name for the test : ")
if(save_images):
if not os.path.exists(self.dir_visualization + test_name):
os.mkdir(self.dir_visualization + test_name)
if not only_green:
print(' No visualization when testing all channels...')
show_images = False
save_images = False
print(' Testing for the database : ' + test_data_path)
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
data_test = il.Test_loader(test_data_path, subimage_size = self.image_size, only_green = only_green)
y = []
scores = []
tp = 0
fp = 0
nb_CGG = 0
accuracy = 0
for i in range(nb_images):
batch, label, width, height, original, image_file = data_test.get_next_image()
batch_size = batch.shape[0]
j = 0
prediction = 0
labels = []
diff = []
nb_im = 0
while j < batch_size:
if other_clf:
feed_dict = {self.x: batch[j:j+minibatch_size], self.keep_prob: 1.0}
features = self.flatten.eval(feed_dict = feed_dict)
pred = np.log(self.clf.predict_proba(features) + 0.00001)
else:
feed_dict = {self.x: batch[j:j+minibatch_size], self.keep_prob: 1.0}
pred = self.y_conv.eval(feed_dict)
nb_im += pred.shape[0]
label_image = np.argmax(pred, 1)
d = np.max(pred, 1) - np.min(pred, 1)
for k in range(d.shape[0]):
diff.append(np.round(d[k], 1))
if decision_rule == 'majority_vote':
prediction += np.sum(label_image)
if decision_rule == 'weighted_vote':
prediction += np.sum(2*d*(label_image - 0.5))
for l in label_image:
labels.append(data_test.image_class[l])
j+=minibatch_size
if(label == 'Real'):
y.append(0)
else:
y.append(1)
# print(prediction/nb_im)
scores.append(prediction/nb_im)
diff = np.array(diff)
if decision_rule == 'majority_vote':
prediction = data_test.image_class[int(np.round(prediction/batch_size))]
if decision_rule == 'weighted_vote':
prediction = data_test.image_class[int(max(prediction,0)/abs(prediction))]
if label == 'CGG':
nb_CGG += 1
if(label == prediction):
accuracy+= 1
if(prediction == 'CGG'):
tp += 1
else:
if prediction == 'CGG':
fp += 1
print(prediction, label)
if show_images and not save_images:
test_name = ''
if save_images or show_images:
self.image_visualization(path_save = self.dir_visualization + test_name,
file_name = str(i),
images = batch, labels_pred = labels,
true_label = label, width = width,
height = height, diff = diff,
original = original,
show_images = show_images,
save_images = save_images,
save_original = save_images,
prob_map = save_images)
if ((i+1)%10 == 0):
print('\n_______________________________________________________')
print(str(i+1) + '/' + str(nb_images) + ' images treated.')
print('Accuracy : ' + str(round(100*accuracy/(i+1), 2)) + '%')
if tp + fp != 0:
print('Precision : ' + str(round(100*tp/(tp + fp), 2)) + '%')
if nb_CGG != 0:
print('Recall : ' + str(round(100*tp/nb_CGG,2)) + '%')
print('_______________________________________________________\n')
print(np.array(y))
fpr, tpr, thresholds = roc_curve(np.array(y), 0.5 + np.array(scores)/10)
print(0.5 + np.array(scores)/np.max(np.array(scores)))
print(thresholds)
filename = '/home/smg/v-nicolas/ROC/' + test_name + '.pkl'
print('Saving tpr and fpr in file : ' + filename)
pickle.dump((fpr,tpr), open(filename, 'wb'))
print('\n_______________________________________________________')
print('Final Accuracy : ' + str(round(100*accuracy/(nb_images), 3)) + '%')
print('Final Precision : ' + str(round(100*tp/(tp + fp), 3)) + '%')
print('Final Recall : ' + str(round(100*tp/nb_CGG, 3)) + '%')
print('Final AUC : ' + str(round(100*auc(fpr, tpr), 3)) + '%')
print('_______________________________________________________\n')
def image_visualization(self, path_save, file_name, images, labels_pred,
true_label, width, height, diff, original = None,
show_images = False, save_images = False,
prob_map = False, save_original = False):
"""Computes image visualization and save/show it
Permits to visualize the probability map of the image. Green color represents correctly classified patches
and red wrongly classified ones. The intensity depends on the level of certainty.
:param path_save: The absolute path where images should be saved
:param file_name: The name of input image file
:param images: An array containing patches extracted from the full-size image
:param width: The width of the full-size image
:param height: The height of the full-size image
:param diff: Differences between log posterior probabilities for each patch
:param original: The original image
:param show_images: Whether to show images or not
:param save_images: Whether to save images or not
:param prob_map: Whether to save the probability map
:param save_original: Whether to save the original image
:type path_save: str
:type file_name: str
:type images: numpy array
:type width: int
:type height: int
:type diff: numpy array
:type original: numpy array
:type show_images: bool
:type save_images: bool
:type prob_map: bool
:type save_original: bool
"""
nb_width = int(width/self.image_size)
nb_height = int(height/self.image_size)
m = 10
img = plt.figure(figsize = (nb_width, nb_height))
gs1 = gridspec.GridSpec(nb_height, nb_width)
for i in range(len(images)):
cdict_green = {'red': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0,1.0))}
cdict_red = {'red': ((0.0,0.0,0.0),
(1.0,1.0,1.0)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m))}
ax1 = plt.subplot(gs1[i])
ax1.axis('off')
if labels_pred[i] == 'Real':
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_green', cdict_green, 100)
else:
cmap = 'gray'
else:
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_red', cdict_red, 100)
else:
cmap = 'gray'
images[i,0,0,0] = 0
images[i,0,1,0] = 1
plt.imshow(images[i,:,:,0], cmap = cmap)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
# ax1.text(40, 50, str(diff[i]))
gs1.update(wspace=.0, hspace=.0)
if show_images:
plt.show(img)
if save_images:
plt.savefig(path_save + '/vis_' + file_name + '.png',
bbox_inches='tight',
pad_inches=0.0)
plt.close()
if save_images:
if save_original:
plt.figure()
plt.axis('off')
plt.imshow(original, cmap = 'gray')
plt.savefig(path_save + '/vis_' + file_name + '_original' + '.png',
bbox_inches='tight',
pad_inches=0.0)
if prob_map:
img = plt.figure(figsize = (nb_width, nb_height))
gs1 = gridspec.GridSpec(nb_height, nb_width)
for i in range(len(images)):
map_im = np.ones((self.image_size, self.image_size))
map_im[0,0] = 0
cdict_green = {'red': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0,1.0))}
cdict_red = {'red': ((0.0,0.0,0.0),
(1.0,1.0,1.0)),
'blue': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m)),
'green': ((0.0,0.0,0.0),
(1.0,1.0 - diff[i]/m,1.0 - diff[i]/m))}
ax1 = plt.subplot(gs1[i])
ax1.axis('off')
if labels_pred[i] == true_label:
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_green', cdict_green, 100)
else:
cmap = 'gray'
map_im = map_im*0.7
else:
if diff[i] > 0.4:
cmap = mcolors.LinearSegmentedColormap('my_red', cdict_red, 100)
else:
cmap = 'gray'
map_im = map_im*0.7
plt.imshow(map_im, cmap = cmap)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
gs1.update(wspace=.0, hspace=.0)
if show_images:
plt.show(img)
if save_images:
plt.savefig(path_save + '/vis_' + file_name + '_probmap' + '.png',
bbox_inches='tight',
pad_inches=0.0)
plt.close()
del(img)
def show_filtered(self, image_file):
print(' Loading image from file : ' + image_file)
im = Image.open(image_file)
im = np.reshape(np.array([np.asarray(im)]), (1,self.image_size, self.image_size, 1))
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
feed_dict = {self.x: im, self.keep_prob: 1.0}
filtered = self.h_conv1.eval(feed_dict = feed_dict)
for i in range(filtered.shape[3]):
plt.figure()
plt.imshow(filtered[0,:,:,i], cmap = 'gray')
plt.show()
def test_splicing(self, data_path, nb_images, save_images = True, show_images = False,
minibatch_size = 25):
"""Computes image visualization for spliced images
Decomposes each image into patches (with size = self.image_size), computes the posterior probability of each class
and show the probability map.
:param data_path: Path to the spliced images. Should contain two directories : CGG/ and Real/
:param nb_images: Number of spliced images to process
:param show_images: Whether to show images or not
:param save_images: Whether to save images or not
:param minibatch_size: The size of the batch to process the patches
:type data_path: str
:type nb_images: int
:type show_images: bool
:type save_images: bool
:type minibatch_size: int
"""
if(save_images):
test_name = input(" Choose a name for the test : ")
path_save = self.dir_visualization + test_name
if not os.path.exists(self.dir_visualization + test_name):
os.mkdir(self.dir_visualization + test_name)
else:
path_save = ''
print(' start session ...')
with tf.Session(graph=self.graph) as sess:
saver = tf.train.Saver()
print(' variable initialization ...')
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
file_to_restore = input("\nName of the file to restore (Directory : " +
self.dir_ckpt + ') : ')
saver.restore(sess, self.dir_ckpt + file_to_restore)
data_test = il.Test_loader(data_path,
subimage_size = self.image_size, only_green = self.only_green)
for i in range(nb_images):
batch, label, width, height, original, file_name = data_test.get_next_image()
batch_size = batch.shape[0]
j = 0
labels = []
diff = []
while j < batch_size:
feed_dict = {self.x: batch[j:j+minibatch_size], self.keep_prob: 1.0}
pred = self.y_conv.eval(feed_dict)
label_image = np.argmax(pred, 1)
d = np.max(pred, 1) - np.min(pred, 1)
for k in range(d.shape[0]):
diff.append(np.round(d[k], 1))
for l in label_image:
labels.append(data_test.image_class[l])
j+=minibatch_size
diff = np.array(diff)
self.image_visualization(path_save = path_save,
file_name = str(i),
images = batch, labels_pred = labels,
true_label = label, width = width,
height = height, diff = diff,
original = original,
show_images = show_images,
save_images = save_images,
prob_map = save_images,
save_original= save_images)
if __name__ == '__main__':
using_GPU = False
if config == 'server':
database_path = '/work/smg/v-nicolas/level-design_raise_100/'
else:
database_path = '/home/nicolas/Database/level-design_raise_100/'
image_size = 100
nb_train_batch = 5000
nb_test_batch = 80
nb_validation_batch = 40
clf = Model(database_path, image_size, nbins = 11,
batch_size = 50, histograms = False, stats = True,
using_GPU = using_GPU)
# clf.mean_histogram()
# clf.show_filtered('/home/nicolas/Database/level-design_dresden_100/train/CGG/train153.jpg')
clf.train(nb_train_batch = nb_train_batch,
nb_test_batch = nb_test_batch,
nb_validation_batch = nb_validation_batch,
show_filters = False)
# clf.svm_training(nb_train_batch = 800, nb_test_batch = 80)
if config == 'server':
test_data_path = '/work/smg/v-nicolas/level-design_raise_650/test/'
else:
test_data_path = '/home/nicolas/Database/level-design_raise_650/test/'
clf.test_total_images(test_data_path = test_data_path,
nb_images = 720, decision_rule = 'weighted_vote',
show_images = False,
save_images = False,
only_green = True,
other_clf = False)
if config == 'server':
test_data_path = '/work/smg/v-nicolas/level-design_raise/test/'
else:
test_data_path = '/home/nicolas/Database/level-design_raise/test/'
clf.test_total_images(test_data_path = test_data_path,
nb_images = 720, decision_rule = 'weighted_vote',
show_images = False,
save_images = False,
only_green = True,
other_clf = False)
if config == 'server':
splicing_data_path = '/work/smg/v-nicolas/splicing/'
else:
splicing_data_path = '/home/nicolas/Database/splicing/'
clf.test_splicing(data_path = splicing_data_path,
nb_images = 50,
minibatch_size = 25,
show_images = False,
save_images = True)
| mit |
gundramleifert/exp_tf | models/lp/bdlstm_lp_v12.py | 1 | 13753 | '''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 4
learningRate = 0.001
momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
stepsPerEpocheTrain = len(trainList) / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(conv2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 128, 256], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[256]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
outputs, _, _ = bidirectional_rnn(forwardH1, backwardH1, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_mean(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
logits3d, seqAfterConv = inference(inputX, seqLengths)
loss = loss(logits3d, targetY, seqAfterConv)
optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp12/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 105826.447447
# Train: CER 0.513716612931
# Train time 12194.873775
# Val: CTC-loss 1504.79408941
# Val: CER 0.072881856362
# Val time 411.808479071
# Epoch 2 ...
# Train: CTC-loss 15091.1940137
# Train: CER 0.0667136614703
# Train time 12173.1077261
# Val: CTC-loss 1179.40594752
# Val: CER 0.0556193102747
# Val time 400.204962015
# Epoch 3 ...
# Train: CTC-loss 11900.7118423
# Train: CER 0.0526622576448
# Train time 12172.2554049
# Val: CTC-loss 1039.946141
# Val: CER 0.0491684744656
# Val time 398.404884815
# Epoch 4 ...
# Train: CTC-loss 10437.7306473
# Train: CER 0.0469169636371
# Train time 10877.9467349
# Val: CTC-loss 1007.47150323
# Val: CER 0.0451915404101
# Val time 334.727671146
# Epoch 5 ...
# Train: CTC-loss 9443.08544896
# Train: CER 0.0425922564416
# Train time 9464.47906113
# Val: CTC-loss 1017.57869761
# Val: CER 0.0446914900492
# Val time 241.660830975
# Epoch 6 ...
# Train: CTC-loss 8564.48897751
# Train: CER 0.0392383345344
# Train time 8625.19990706
# Val: CTC-loss 995.913742108
# Val: CER 0.0450711468607
# Val time 292.950110912
# Epoch 7 ...
# Train: CTC-loss 7731.21376524
# Train: CER 0.0359268137305
# Train time 7913.68863797
# Val: CTC-loss 995.542173939
# Val: CER 0.0444037097742
# Val time 261.380025864
# Epoch 8 ...
# Train: CTC-loss 7036.58519177
# Train: CER 0.0332956298213
# Train time 7966.78129411
# Val: CTC-loss 1087.22995544
# Val: CER 0.0453201992959
# Val time 251.776542902
# Epoch 9 ...
# Train: CTC-loss 6386.55617645
# Train: CER 0.0303261985639
# Train time 7701.56733513
# Val: CTC-loss 1026.38316258
# Val: CER 0.0436090447605
# Val time 218.07487011
# Epoch 10 ...
# Train: CTC-loss 5824.7079256
# Train: CER 0.0284933981667
# Train time 5460.63484693
# Val: CTC-loss 1080.23335057
# Val: CER 0.0445155570209
# Val time 172.762127876
# Epoch 11 ...
# Train: CTC-loss 5356.71286768
# Train: CER 0.0262982310051
# Train time 3948.23342299
# Val: CTC-loss 1104.59183891
# Val: CER 0.0456411995143
# Val time 98.4593589306
# Epoch 12 ...
# Train: CTC-loss 4836.94458857
# Train: CER 0.0240043066311
# Train time 3012.10682011
# Val: CTC-loss 1111.79024631
# Val: CER 0.0460910586268
# Val time 98.5978720188
# Epoch 13 ...
# Train: CTC-loss 4369.76073904
# Train: CER 0.0218724541638
# Train time 3011.66104412
# Val: CTC-loss 1166.40841607
# Val: CER 0.0444325187852
# Val time 99.0049960613
# Epoch 14 ...
# Train: CTC-loss 4189.45209316
# Train: CER 0.0211254203888
# Train time 3048.59282207
# Val: CTC-loss 1196.65375275
# Val: CER 0.0473934103052
# Val time 98.5281729698
# Epoch 15 ...
# Train: CTC-loss 4080.45600853
# Train: CER 0.0209099855089
# Train time 3016.35282397
# Val: CTC-loss 1234.12073825
# Val: CER 0.0460957174202
# Val time 98.3190040588 | apache-2.0 |
xlhtc007/blaze | blaze/server/server.py | 10 | 11382 | from __future__ import absolute_import, division, print_function
import socket
import functools
import re
import flask
from flask import Blueprint, Flask, request, Response
try:
from bokeh.server.crossdomain import crossdomain
except ImportError:
def crossdomain(*args, **kwargs):
def wrapper(f):
@functools.wraps(f)
def wrapped(*a, **k):
return f(*a, **k)
return wrapped
return wrapper
from toolz import assoc
from datashape import Mono, discover
from datashape.predicates import iscollection, isscalar
from odo import odo
import blaze
from blaze import compute
from blaze.expr import utils as expr_utils
from blaze.compute import compute_up
from .serialization import json, all_formats
from ..interactive import InteractiveSymbol, coerce_scalar
from ..expr import Expr, symbol
__all__ = 'Server', 'to_tree', 'from_tree'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
api = Blueprint('api', __name__)
pickle_extension_api = Blueprint('pickle_extension_api', __name__)
_no_default = object() # sentinel
def _get_option(option, options, default=_no_default):
try:
return options[option]
except KeyError:
if default is not _no_default:
return default
# Provides a more informative error message.
raise TypeError(
'The blaze api must be registered with {option}'.format(
option=option,
),
)
def _register_api(app, options, first_registration=False):
"""
Register the data with the blueprint.
"""
_get_data.cache[app] = _get_option('data', options)
_get_format.cache[app] = dict(
(f.name, f) for f in _get_option('formats', options)
)
_get_auth.cache[app] = (
_get_option('authorization', options, None) or (lambda a: True)
)
# Call the original register function.
Blueprint.register(api, app, options, first_registration)
api.register = _register_api
def _get_data():
"""
Retrieve the current application's data for use in the blaze server
endpoints.
"""
return _get_data.cache[flask.current_app]
_get_data.cache = {}
def _get_format(name):
return _get_format.cache[flask.current_app][name]
_get_format.cache = {}
def _get_auth():
return _get_auth.cache[flask.current_app]
_get_auth.cache = {}
def authorization(f):
@functools.wraps(f)
def authorized(*args, **kwargs):
if not _get_auth()(request.authorization):
return Response(
'bad auth token',
401,
{'WWW-Authenticate': 'Basic realm="Login Required"'},
)
return f(*args, **kwargs)
return authorized
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : dict, optional
A dictionary mapping dataset name to any data format that blaze
understands.
formats : iterable, optional
An iterable of supported serialization formats. By default, the
server will support JSON.
A serialization format is an object that supports:
name, loads, and dumps.
authorization : callable, optional
A callable to be used to check the auth header from the client.
This callable should accept a single argument that will either be
None indicating that no header was passed, or an object
containing a username and password attribute. By default, all requests
are allowed.
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
__slots__ = 'app', 'data', 'port'
def __init__(self, data=None, formats=None, authorization=None):
app = self.app = Flask('blaze.server.server')
if data is None:
data = dict()
app.register_blueprint(
api,
data=data,
formats=formats if formats is not None else (json,),
authorization=authorization,
)
self.data = data
def run(self, *args, **kwargs):
"""Run the server"""
port = kwargs.pop('port', DEFAULT_PORT)
self.port = port
try:
self.app.run(*args, port=port, **kwargs)
except socket.error:
print("\tOops, couldn't connect on port %d. Is it busy?" % port)
if kwargs.get('retry', True):
# Attempt to start the server on a new port.
self.run(*args, **assoc(kwargs, 'port', port + 1))
@api.route('/datashape', methods=['GET'])
@crossdomain(origin='*', methods=['GET'])
@authorization
def shape():
return str(discover(_get_data()))
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr : Expr
A Blaze expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Column',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
from_tree
"""
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
if isinstance(expr, slice):
return {'op': 'slice',
'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
elif isinstance(expr, Mono):
return str(expr)
elif isinstance(expr, InteractiveSymbol):
return to_tree(symbol(expr._name, expr.dshape), names)
elif isinstance(expr, Expr):
return {'op': type(expr).__name__,
'args': [to_tree(arg, names) for arg in expr._args]}
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
t
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Field',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
to_tree
"""
if isinstance(expr, dict):
op, args = expr['op'], expr['args']
if 'slice' == op:
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if 'Symbol' in op:
children = [from_tree(arg) for arg in args]
else:
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, list):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
mimetype_regex = re.compile(r'^application/vnd\.blaze\+(%s)$' %
'|'.join(x.name for x in all_formats))
@api.route('/compute', methods=['POST', 'HEAD', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
def compserver():
content_type = request.headers['content-type']
matched = mimetype_regex.match(content_type)
if matched is None:
return 'Unsupported serialization format %s' % content_type, 415
try:
serial = _get_format(matched.groups()[0])
except KeyError:
return (
"Unsupported serialization format '%s'" % matched.groups()[0],
415,
)
try:
payload = serial.loads(request.data)
except ValueError:
return ("Bad data. Got %s " % request.data, 400) # 400: Bad Request
ns = payload.get('namespace', dict())
dataset = _get_data()
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset})
if iscollection(expr.dshape):
result = odo(result, list)
elif isscalar(expr.dshape):
result = coerce_scalar(result, str(expr.dshape))
except NotImplementedError as e:
# 501: Not Implemented
return ("Computation not supported:\n%s" % e, 501)
except Exception as e:
# 500: Internal Server Error
return ("Computation failed with message:\n%s" % e, 500)
return serial.dumps({
'datashape': str(expr.dshape),
'data': result,
'names': expr.fields
})
| bsd-3-clause |
inkenbrandt/WellApplication | wellapplication/chem.py | 1 | 20675 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 05 09:50:51 2016
@author: paulinkenbrandt
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
from datetime import datetime
import numpy as np
import requests
class WQP(object):
"""Downloads Water Quality Data from thw Water Quality Portal based on parameters entered
:param values: query parameter designating location to select site; this is the Argument for the REST parameter in
table 1 of https://www.waterqualitydata.us/webservices_documentation/
:param loc_type: type of query to perform; valid inputs include 'huc', 'bBox', 'countycode', 'siteid';
this is the REST parameter of table 1 of https://www.waterqualitydata.us/webservices_documentation/
:type loc_type: str
:type values: str
:param **kwargs: additional Rest Parameters
:Example:
>>> wq = WQP('-111.54,40.28,-111.29,40.48','bBox')
https://www.waterqualitydata.us/Result/search?mimeType=csv&zip=no&siteType=Spring&siteType=Well&characteristicType=Inorganics%2C+Major%2C+Metals&characteristicType=Inorganics%2C+Major%2C+Non-metals&characteristicType=Nutrient&characteristicType=Physical&bBox=-111.54%2C40.28%2C-111.29%2C40.48&sorted=no&sampleMedia=Water
"""
def __init__(self, values, loc_type, **kwargs):
r"""Downloads Water Quality Data from thw Water Quality Portal based on parameters entered
"""
self.loc_type = loc_type
self.values = values
self.url = 'https://www.waterqualitydata.us/'
self.geo_criteria = ['sites', 'stateCd', 'huc', 'countyCd', 'bBox']
self.cTgroups = ['Inorganics, Major, Metals', 'Inorganics, Major, Non-metals', 'Nutrient', 'Physical']
self.results = self.get_wqp_results('Result', **kwargs)
self.stations = self.get_wqp_stations('Station', **kwargs)
def get_response(self, service, **kwargs):
""" Returns a dictionary of data requested by each function.
:param service: options include 'Station' or 'Results'
table 1 of https://www.waterqualitydata.us/webservices_documentation/
"""
http_error = 'Could not connect to the API. This could be because you have no internet connection, a parameter' \
' was input incorrectly, or the API is currently down. Please try again.'
# For python 3.4
# try:
kwargs[self.loc_type] = self.values
kwargs['mimeType'] = 'csv'
kwargs['zip'] = 'no'
kwargs['sorted'] = 'no'
if 'siteType' not in kwargs:
kwargs['sampleMedia'] = 'Water'
if 'siteType' not in kwargs:
kwargs['siteType'] = ['Spring', 'Well']
print('This function is biased towards groundwater. For all sites, use')
if 'characteristicType' not in kwargs:
kwargs['characteristicType'] = self.cTgroups
total_url = self.url + service + '/search?'
response_ob = requests.get(total_url, params=kwargs)
return response_ob
def get_wqp_stations(self, service, **kwargs):
nwis_dict = self.get_response(service, **kwargs).url
stations = pd.read_csv(nwis_dict)
return stations
def get_wqp_results(self, service, **kwargs):
"""Bring data from WQP site into a Pandas DataFrame for analysis"""
# set data types
Rdtypes = {"OrganizationIdentifier": np.str_, "OrganizationFormalName": np.str_, "ActivityIdentifier": np.str_,
"ActivityStartTime/Time": np.str_,
"ActivityTypeCode": np.str_, "ActivityMediaName": np.str_, "ActivityMediaSubdivisionName": np.str_,
"ActivityStartDate": np.str_, "ActivityStartTime/TimeZoneCode": np.str_,
"ActivityEndDate": np.str_, "ActivityEndTime/Time": np.str_, "ActivityEndTime/TimeZoneCode": np.str_,
"ActivityDepthHeightMeasure/MeasureValue": np.float16,
"ActivityDepthHeightMeasure/MeasureUnitCode": np.str_,
"ActivityDepthAltitudeReferencePointText": np.str_,
"ActivityTopDepthHeightMeasure/MeasureValue": np.float16,
"ActivityTopDepthHeightMeasure/MeasureUnitCode": np.str_,
"ActivityBottomDepthHeightMeasure/MeasureValue": np.float16,
"ActivityBottomDepthHeightMeasure/MeasureUnitCode": np.str_,
"ProjectIdentifier": np.str_, "ActivityConductingOrganizationText": np.str_,
"MonitoringLocationIdentifier": np.str_, "ActivityCommentText": np.str_,
"SampleAquifer": np.str_, "HydrologicCondition": np.str_, "HydrologicEvent": np.str_,
"SampleCollectionMethod/MethodIdentifier": np.str_,
"SampleCollectionMethod/MethodIdentifierContext": np.str_,
"SampleCollectionMethod/MethodName": np.str_, "SampleCollectionEquipmentName": np.str_,
"ResultDetectionConditionText": np.str_, "CharacteristicName": np.str_,
"ResultSampleFractionText": np.str_,
"ResultMeasureValue": np.str_, "ResultMeasure/MeasureUnitCode": np.str_,
"MeasureQualifierCode": np.str_,
"ResultStatusIdentifier": np.str_, "StatisticalBaseCode": np.str_, "ResultValueTypeName": np.str_,
"ResultWeightBasisText": np.str_, "ResultTimeBasisText": np.str_,
"ResultTemperatureBasisText": np.str_,
"ResultParticleSizeBasisText": np.str_, "PrecisionValue": np.str_, "ResultCommentText": np.str_,
"USGSPCode": np.str_, "ResultDepthHeightMeasure/MeasureValue": np.float16,
"ResultDepthHeightMeasure/MeasureUnitCode": np.str_,
"ResultDepthAltitudeReferencePointText": np.str_,
"SubjectTaxonomicName": np.str_, "SampleTissueAnatomyName": np.str_,
"ResultAnalyticalMethod/MethodIdentifier": np.str_,
"ResultAnalyticalMethod/MethodIdentifierContext": np.str_,
"ResultAnalyticalMethod/MethodName": np.str_, "MethodDescriptionText": np.str_,
"LaboratoryName": np.str_,
"AnalysisStartDate": np.str_, "ResultLaboratoryCommentText": np.str_,
"DetectionQuantitationLimitTypeName": np.str_,
"DetectionQuantitationLimitMeasure/MeasureValue": np.str_,
"DetectionQuantitationLimitMeasure/MeasureUnitCode": np.str_, "PreparationStartDate": np.str_,
"ProviderName": np.str_}
# define date field indices
dt = [6, 56, 61]
csv = self.get_response(service, **kwargs).url
print(csv)
# read csv into DataFrame
df = pd.read_csv(csv, dtype=Rdtypes, parse_dates=dt)
return df
def massage_results(self, df = ''):
"""Massage WQP result data for analysis
When called, this function:
- renames all of the results fields, abbreviating the fields and eliminating slashes and spaces.
- parses the datetime fields, fixing errors when possible (see :func:`datetimefix`)
- standardizes units to mg/L
- normalizes nutrient species(See :func:`parnorm`)
"""
if df == '':
df = self.results
# Map new names for columns
ResFieldDict = {"AnalysisStartDate": "AnalysisDate", "ResultAnalyticalMethod/MethodIdentifier": "AnalytMeth",
"ResultAnalyticalMethod/MethodName": "AnalytMethId",
"ResultDetectionConditionText": "DetectCond",
"ResultLaboratoryCommentText": "LabComments", "LaboratoryName": "LabName",
"DetectionQuantitationLimitTypeName": "LimitType",
"DetectionQuantitationLimitMeasure/MeasureValue": "MDL",
"DetectionQuantitationLimitMeasure/MeasureUnitCode": "MDLUnit",
"MethodDescriptionText": "MethodDescript",
"OrganizationIdentifier": "OrgId", "OrganizationFormalName": "OrgName",
"CharacteristicName": "Param",
"ProjectIdentifier": "ProjectId", "MeasureQualifierCode": "QualCode",
"ResultCommentText": "ResultComment",
"ResultStatusIdentifier": "ResultStatus", "ResultMeasureValue": "ResultValue",
"ActivityCommentText": "SampComment", "ActivityDepthHeightMeasure/MeasureValue": "SampDepth",
"ActivityDepthAltitudeReferencePointText": "SampDepthRef",
"ActivityDepthHeightMeasure/MeasureUnitCode": "SampDepthU",
"SampleCollectionEquipmentName": "SampEquip",
"ResultSampleFractionText": "SampFrac", "ActivityStartDate": "SampleDate",
"ActivityIdentifier": "SampleId",
"ActivityStartTime/Time": "SampleTime", "ActivityMediaSubdivisionName": "SampMedia",
"SampleCollectionMethod/MethodIdentifier": "SampMeth",
"SampleCollectionMethod/MethodName": "SampMethName",
"ActivityTypeCode": "SampType", "MonitoringLocationIdentifier": "StationId",
"ResultMeasure/MeasureUnitCode": "Unit", "USGSPCode": "USGSPCode"}
# Rename Data
df = self.results
df1 = df.rename(columns=ResFieldDict)
# Remove unwanted and bad times
df1["SampleDate"] = df1[["SampleDate", "SampleTime"]].apply(lambda x: self.datetimefix(x, "%Y-%m-%d %H:%M"), 1)
# Define unneeded fields to drop
resdroplist = ["ActivityBottomDepthHeightMeasure/MeasureUnitCode",
"ActivityBottomDepthHeightMeasure/MeasureValue",
"ActivityConductingOrganizationText", "ActivityEndDate", "ActivityEndTime/Time",
"ActivityEndTime/TimeZoneCode", "ActivityMediaName", "ActivityStartTime/TimeZoneCode",
"ActivityTopDepthHeightMeasure/MeasureUnitCode", "ActivityTopDepthHeightMeasure/MeasureValue",
"HydrologicCondition", "HydrologicEvent", "PrecisionValue", "PreparationStartDate",
"ProviderName",
"ResultAnalyticalMethod/MethodIdentifierContext", "ResultDepthAltitudeReferencePointText",
"ResultDepthHeightMeasure/MeasureUnitCode", "ResultDepthHeightMeasure/MeasureValue",
"ResultParticleSizeBasisText", "ResultTemperatureBasisText",
"ResultTimeBasisText", "ResultValueTypeName", "ResultWeightBasisText", "SampleAquifer",
"SampleCollectionMethod/MethodIdentifierContext", "SampleTissueAnatomyName",
"StatisticalBaseCode",
"SubjectTaxonomicName", "SampleTime"]
# Drop fields
df1 = df1.drop(resdroplist, axis=1)
# convert results and mdl to float
df1['ResultValue'] = pd.to_numeric(df1['ResultValue'], errors='coerce')
df1['MDL'] = pd.to_numeric(df1['MDL'], errors='coerce')
# match old and new station ids
df1['StationId'] = df1['StationId'].str.replace('_WQX-', '-')
# standardize all ug/l data to mg/l
df1.Unit = df1.Unit.apply(lambda x: str(x).rstrip(), 1)
df1.ResultValue = df1[["ResultValue", "Unit"]].apply(
lambda x: x[0] / 1000 if str(x[1]).lower() == "ug/l" else x[0], 1)
df1.Unit = df1.Unit.apply(lambda x: self.unitfix(x), 1)
df1['Param'], df1['ResultValue'], df1['Unit'] = zip(
*df1[['Param', 'ResultValue', 'Unit']].apply(lambda x: self.parnorm(x), 1))
#self.results = df1
return df1
def datetimefix(self, x, form):
"""This script cleans date-time errors
:param x: date-time string
:param form: format of date-time string
:returns: formatted datetime type
"""
d = str(x[0]).lstrip().rstrip()[0:10]
t = str(x[1]).lstrip().rstrip()[0:5].zfill(5)
try:
int(d[0:2])
except(ValueError, TypeError, NameError):
return np.nan
try:
int(t[0:2])
int(t[3:5])
except(ValueError, TypeError, NameError):
t = "00:00"
if int(t[0:2]) > 23:
t = "00:00"
elif int(t[3:5]) > 59:
t = "00:00"
else:
t = t[0:2].zfill(2) + ":" + t[3:5]
return datetime.strptime(d + " " + t, form)
def parnorm(self, x):
"""Standardizes nutrient species
- Nitrate as N to Nitrate
- Nitrite as N to Nitrite
- Sulfate as s to Sulfate
"""
p = str(x[0]).rstrip().lstrip().lower()
u = str(x[2]).rstrip().lstrip().lower()
if p == 'nitrate' and u == 'mg/l as n':
return 'Nitrate', x[1] * 4.427, 'mg/l'
elif p == 'nitrite' and u == 'mg/l as n':
return 'Nitrite', x[1] * 3.285, 'mg/l'
elif p == 'ammonia-nitrogen' or p == 'ammonia-nitrogen as n' or p == 'ammonia and ammonium':
return 'Ammonium', x[1] * 1.288, 'mg/l'
elif p == 'ammonium' and u == 'mg/l as n':
return 'Ammonium', x[1] * 1.288, 'mg/l'
elif p == 'sulfate as s':
return 'Sulfate', x[1] * 2.996, 'mg/l'
elif p in ('phosphate-phosphorus', 'phosphate-phosphorus as p', 'orthophosphate as p'):
return 'Phosphate', x[1] * 3.066, 'mg/l'
elif (p == 'phosphate' or p == 'orthophosphate') and u == 'mg/l as p':
return 'Phosphate', x[1] * 3.066, 'mg/l'
elif u == 'ug/l':
return x[0], x[1] / 1000, 'mg/l'
else:
return x[0], x[1], str(x[2]).rstrip()
def unitfix(self, x):
"""Standardizes unit labels from ug/l to mg/l
:param x: unit label to convert
:type x: str
:returns: unit string as mg/l
.. warning:: must be used with a value conversion tool
"""
z = str(x).lower()
if z == "ug/l":
return "mg/l"
elif z == "mg/l":
return "mg/l"
else:
return x
def massage_stations(self):
"""Massage WQP station data for analysis
"""
StatFieldDict = {"MonitoringLocationIdentifier": "StationId", "AquiferName": "Aquifer",
"AquiferTypeName": "AquiferType",
"ConstructionDateText": "ConstDate", "CountyCode": "CountyCode",
"WellDepthMeasure/MeasureValue": "Depth",
"WellDepthMeasure/MeasureUnitCode": "DepthUnit", "VerticalMeasure/MeasureValue": "Elev",
"VerticalAccuracyMeasure/MeasureValue": "ElevAcc",
"VerticalAccuracyMeasure/MeasureUnitCode": "ElevAccUnit",
"VerticalCollectionMethodName": "ElevMeth",
"VerticalCoordinateReferenceSystemDatumName": "ElevRef",
"VerticalMeasure/MeasureUnitCode": "ElevUnit", "FormationTypeText": "FmType",
"WellHoleDepthMeasure/MeasureValue": "HoleDepth",
"WellHoleDepthMeasure/MeasureUnitCode": "HoleDUnit",
"HorizontalAccuracyMeasure/MeasureValue": "HorAcc",
"HorizontalAccuracyMeasure/MeasureUnitCode": "HorAccUnit",
"HorizontalCollectionMethodName": "HorCollMeth",
"HorizontalCoordinateReferenceSystemDatumName": "HorRef",
"HUCEightDigitCode": "HUC8", "LatitudeMeasure": "Lat_Y", "LongitudeMeasure": "Lon_X",
"OrganizationIdentifier": "OrgId", "OrganizationFormalName": "OrgName",
"StateCode": "StateCode",
"MonitoringLocationDescriptionText": "StationComment", "MonitoringLocationName": "StationName",
"MonitoringLocationTypeName": "StationType"}
df = self.stations
df.rename(columns=StatFieldDict, inplace=True)
statdroplist = ["ContributingDrainageAreaMeasure/MeasureUnitCode",
"ContributingDrainageAreaMeasure/MeasureValue",
"DrainageAreaMeasure/MeasureUnitCode", "DrainageAreaMeasure/MeasureValue", "CountryCode",
"ProviderName",
"SourceMapScaleNumeric"]
df.drop(statdroplist, inplace=True, axis=1)
TypeDict = {"River/Stream": "Stream", "Stream: Canal": "Stream",
"Well: Test hole not completed as a well": "Well"}
# Make station types in the StationType field consistent for easier summary and compilation later on.
df.StationType = df["StationType"].apply(lambda x: TypeDict.get(x, x), 1)
df.Elev = df.Elev.apply(lambda x: np.nan if x == 0.0 else round(x, 1), 1)
# Remove preceding WQX from StationId field to remove duplicate station data created by legacy database.
df['StationId'] = df['StationId'].str.replace('_WQX-', '-')
df.drop_duplicates(subset=['StationId'], inplace=True)
#self.stations = df
return df
def piv_chem(self, results='', chems='piper'):
"""pivots results DataFrame for input into piper class
:param results: DataFrame of results data from WQP; default is return from call of :class:`WQP`
:param chems: set of chemistry that must be present to retain row; default are the major ions for a piper plot
:return: pivoted table of result values
.. warnings:: this method drops < and > signs from values; do not use it for statistics
"""
if results == '':
results = self.results
ParAbb = {"Alkalinity": "Alk", "Alkalinity, Carbonate as CaCO3": "Alk", "Alkalinity, total": "Alk",
"Arsenic": "As", "Calcium": "Ca", "Chloride": "Cl", "Carbon dioxide": "CO2", "Carbonate": "CO3",
"Carbonate (CO3)": "CO3", "Specific conductance": "Cond", "Conductivity": "Cond", "Copper": "Cu",
"Depth": "Depth", "Dissolved oxygen (DO)": "DO", "Iron": "Fe",
"Hardness, Ca, Mg": "Hard", "Total hardness -- SDWA NPDWR": "Hard",
"Bicarbonate": "HCO3", "Potassium": "K", "Magnesium": "Mg", "Kjeldahl nitrogen": "N",
"Nitrogen, mixed forms (NH3), (NH4), organic, (NO2) and (NO3)": "N", "Nitrogen": "N", "Sodium": "Na",
"Sodium plus potassium": "NaK", "Ammonia-nitrogen": "NH3_N", "Ammonia-nitrogen as N": "N",
"Nitrite": "NO2",
"Nitrate": "NO3", "Nitrate as N": "N", "pH, lab": "pH", "pH": "pH", "Phosphate-phosphorus": "PO4",
"Orthophosphate": "PO4", "Phosphate": "PO4", "Stream flow, instantaneous": "Q", "Flow": "Q",
"Flow rate, instantaneous": "Q", "Silica": "Si", "Sulfate": "SO4", "Sulfate as SO4": "SO4",
"Boron": "B", "Barium": "Ba", "Bromine": "Br", "Lithium": "Li", "Manganese": "Mn", "Strontium": "Sr",
"Total dissolved solids": "TDS", "Temperature, water": "Temp",
"Total Organic Carbon": "TOC", "delta Dueterium": "d2H", "delta Oxygen 18": "d18O",
"delta Carbon 13 from Bicarbonate": "d13CHCO3", "delta Oxygen 18 from Bicarbonate": "d18OHCO3",
"Total suspended solids": "TSS", "Turbidity": "Turb"}
results['ParAbb'] = results['Param'].apply(lambda x: ParAbb.get(x, ''), 1)
results.dropna(subset=['SampleId'], how='any', inplace=True)
results = results[pd.isnull(results['DetectCond'])]
results.drop_duplicates(subset=['SampleId', 'ParAbb'], inplace=True)
datap = results.pivot(index='SampleId', columns='ParAbb', values='ResultValue')
if chems == '':
pass
elif chems == 'piper':
datap.dropna(subset=['SO4', 'Cl', 'Ca', 'HCO3', 'pH'], how='any', inplace=True)
else:
datap.dropna(subset=chems, how='any', inplace=True)
return datap
| mit |
rtavenar/tslearn | tslearn/docs/examples/metrics/plot_lb_keogh.py | 1 | 2786 | # -*- coding: utf-8 -*-
r"""
LB_Keogh
========
This example illustrates the principle of time series envelope and its
relationship to the "LB_Keogh" lower bound [1].
The envelope of a time series consists of two time series such that the
original time series is between the two time series. Denoting the original
time series :math:`X = (X_i)_{1 \leq i \leq n}`, the envelope of this time
series is an ensemble of two time series of same length
:math:`L = (l_i)_{1 \leq i \leq n}` and :math:`U = (u_i)_{1 \leq i \leq n}`
such that for all :math:`i \in \{1, \ldots, n\}`:
.. math::
u_i = \max(x_{i - r}, \ldots, x_{i + r})
l_i = \min(x_{i - r}, \ldots, x_{i + r})
where :math:`r` is the radius of the envelope.
The distance between a time series $Q$ and an envelope :math:`(L, U)` is
defined as:
.. math::
LB_{Keogh}(Q, (L, U)) = \sqrt{\sum_{i=1}^n
\begin{cases}
(q_i - u_i)^2 & \text{if $q_i > u_i$}\\
(q_i - l_i)^2 & \text{if $q_i < l_i$}\\
0 & \text{otherwise}
\end{cases}
}
So it is simply the Euclidean distance between :math:`Q` and the envelope.
[1] E. Keogh and C. A. Ratanamahatana, "Exact indexing of dynamic time
warping". Knowledge and Information Systems, 7(3), 358-386 (2004).
"""
# Author: Romain Tavenard
# Johann Faouzi
# License: BSD 3 clause
# sphinx_gallery_thumbnail_number = 2
import numpy
import matplotlib.pyplot as plt
from tslearn.generators import random_walks
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn import metrics
numpy.random.seed(0)
n_ts, sz, d = 2, 100, 1
dataset = random_walks(n_ts=n_ts, sz=sz, d=d)
scaler = TimeSeriesScalerMeanVariance(mu=0., std=1.) # Rescale time series
dataset_scaled = scaler.fit_transform(dataset)
plt.figure(figsize=(14, 8))
envelope_down, envelope_up = metrics.lb_envelope(dataset_scaled[0], radius=3)
plt.plot(dataset_scaled[0, :, 0], "r-", label='First time series')
plt.plot(envelope_down[:, 0], "b-", label='Lower envelope')
plt.plot(envelope_up[:, 0], "g-", label='Upper envelope')
plt.legend()
plt.title('Envelope around a time series with radius=3')
plt.figure(figsize=(14, 8))
plt.plot(envelope_down[:, 0], "b-", label='Lower envelope')
plt.plot(envelope_up[:, 0], "g-", label='Upper envelope')
plt.plot(dataset_scaled[1, :, 0], "k-", label='Second time series')
plt.vlines(numpy.arange(sz), dataset_scaled[1, :, 0], numpy.clip(
dataset_scaled[1, :, 0], envelope_down[:, 0], envelope_up[:, 0]),
label='Distance', color='orange')
plt.legend()
lb_k_sim = metrics.lb_keogh(dataset_scaled[1],
envelope_candidate=(envelope_down, envelope_up))
plt.title('Distance between the second time series and \n'
'the envelope = {:.4f}'.format(lb_k_sim))
plt.show()
| bsd-2-clause |
CforED/Machine-Learning | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
ahye/FYS2140-Resources | src/TUSL/infinitewell.py | 1 | 3632 | ####################################################################################
###
### Program to find eigenenergies of the infinite square well.
###
####################################################################################
# Importing useful stuff
from numpy import *
from matplotlib.pyplot import *
import scipy.integrate
import numpy as np
import matplotlib.pyplot as plt
# Defining potential
def infinite_well(z):
W = zeros(len(z))
return W
# Constants and parameters
N = 500 # number of points
z = np.linspace(0,1,N) # position array
dz = z[1]-z[0] # step length
tol = 0.1 # tolerance level
W = infinite_well(z) # getting potential
a = 0.4 # width of well [nm]
hbarc = 197.3 # eV nm
mc2 = 0.511*10**6 # eV
Psi = np.zeros(N) # wave function
Psi[0] = 0 # initial condition (function must die in endpoints)
Psi[1] = 0.1 # initial condition
epsilon = [] # list to be filled with epsilon
epsilon_anal = [] # analtyic energy list to be filled
E_n = [] # analytical energies
E = [] # numerical energies
lastpsi = [] # value of last psi
Psi_list = [] # list to store the best Psi
epsilon_trial = 9 # trial eigenvalue
# For plotting numerical solutions with index
number = 0 # in use when labelling wavefunctions in plot
colors = 'cmygbcmygb' # for different colors in plot
color_index = 0
# Search for correct eigenvalue
while epsilon_trial < 160:
# Calculating wave function
for j in range(1,N-1):
Psi[j+1] = (2 - dz**2*(epsilon_trial-W[j+1]))*Psi[j] - Psi[j-1]
# Normalizing
Psi /= sqrt(scipy.integrate.simps(abs(Psi)**2,dx=1e-3))
# Store value of last element in Psi
Psi_end = abs(Psi[-1])
# Check if last element is within tolerance
if Psi_end < tol:
epsilon.append(epsilon_trial)
lastpsi.append(Psi_end)
Psi_list.append(list(Psi)) # add as list to make it behave well
# Only keep those epsilon and Psi giving minimal value of Psi[-1]
if len(lastpsi) > 1 and (epsilon[-1] - epsilon[-2]) < 2:
if lastpsi[-1] < lastpsi[-2]:
lastpsi.remove(lastpsi[-2])
epsilon.remove(epsilon[-2])
Psi_list.remove(Psi_list[-2])
if lastpsi[-1] > lastpsi[-2]:
lastpsi.remove(lastpsi[-1])
epsilon.remove(epsilon[-1])
Psi_list.remove(Psi_list[-1])
# Update trial eigenvalue
epsilon_trial += 0.4
# Physical energies
for i in range(0,len(epsilon)):
eps = epsilon[i]
E_phys = eps*hbarc**2/(2*mc2*a**2)
E.append(E_phys)
# ANALYTIC SOLUTIONS
num = [1,2,3,4]
# Determining energy and wavefunction:
for n in num:
E_physical = n**2*hbarc**2*pi**2/(2*mc2*a**2)
E_n.append(E_physical)
Psi_anal = sin(pi*z*n)
# Normalizing:
Psi_anal /= sqrt(scipy.integrate.simps(abs(Psi_anal)**2,dx=1e-3))
plot(z,Psi_anal,'k--')
# Print lists of energies
print '-------------------------------------------------------------------------------------------------'
print 'Energy levels of infinite potential well of width %.2f nm:' %a
print '-------------------------------------------------------------------------------------------------'
print 'Epsilon: ',epsilon
print 'Numerical energies E [eV]: ', E
print 'Analytical energies En [eV]: ', E_n
print '-------------------------------------------------------------------------------------------------'
# Plotting
for i in range(len(Psi_list)):
Legend = '$\psi_%d$' % (number)
plot(z,Psi_list[i],color=colors[color_index],label=Legend)
number += 1
color_index += 1
# Axes and title
plt.title('$Infinite \ well$',size=20)
plt.xlabel('$z = x/a$',size=18)
plt.ylabel('$\psi(z)$',size=18)
plot([0,0],[0,0],'k--',label='$Analytical$')
plt.legend(loc='best')
show() | mit |
zaxtax/scikit-learn | sklearn/cluster/birch.py | 18 | 22732 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
lateral/hyperplane-hasher | test_classes/test_key_value_store.py | 2 | 21404 | from nn.hh_ensemble_lookup import *
from nn.dictionary_store import *
from ann.hyperplane_hasher import HyperplaneHasher, NORMAL_VECTOR_ID, NUM_NORMAL_VECS_ID, CHAMBER_ID
import unittest, copy, string, numpy as np, random as rd, pandas as pd
RANK = 10
NAME = 'test_HHENL'
METRIC = 'l2'
NNV = 5
NHH = 4
NUM_VECS = 30
class TestHHEnsembleLookup(unittest.TestCase):
def setUp(self):
"""Create a HHEnsembleLookup object whose underlying KeyValueStore object
is a DictionaryStore instance populated by NUM_VECS feature vectors."""
self.letters = list(string.ascii_lowercase)
self.feature_vecs = HyperplaneHasher._random_vectors(NUM_VECS, RANK)
self.feature_vecs_ids = ['%i' % i for i in range(NUM_VECS)]
self.hhenl = self._create_hhenl()
for pair in zip(self.feature_vecs, self.feature_vecs_ids):
vec, vec_id = pair
self.hhenl.add_vector(vec, vec_id)
def _create_hhenl(self):
"""Returns an empty instance of HHEnsembleNNLookup."""
kvstore = DictionaryStore(dict())
return HHEnsembleNNLookup(rank=RANK, name=NAME, metric = METRIC, num_normal_vectors=NNV, num_hyperplane_hashers=NHH, kvstore=kvstore)
def _get_all_hh_labels(self, hh):
"""Returns the set of all labels in all chambers of hh."""
ch_ids = hh.get_chamber_ids()
list_set_labels = [hh.chamber_labels(ch_id) for ch_id in ch_ids]
return reduce(lambda x, y: x.union(y), list_set_labels)
def _rank_error(self, function):
"""Throws ValueError if len(vec) != self.rank."""
vec = self.feature_vecs[0]
vec_id = self.feature_vecs_ids[0]
self.assertRaises(ValueError, function, *[vec[1:], vec_id])
def _bulk_rank_error(self, function):
"""Throws ValueError if len(vec) != self.rank for any vec in vecs."""
vec_short = HyperplaneHasher._random_vectors(1, self.hhenl.rank - 1)
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank) + vec_short
vec_ids = self.letters[:11]
self.hhenl._bulk_label_chamber_ensemble(vecs[:10], vec_ids[:10])
self.assertRaises(ValueError, function, *[vecs, vec_ids])
def _bulk_list_length_error(self, function):
"""Throws ValueError if len(vec_ids) != len(vec_ids)."""
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank)
vec_ids = self.letters[:11]
self.assertRaises(ValueError, function, *[vecs, vec_ids])
def test_init_1(self):
"""Class attributes are correctly initialised."""
self.assertEqual(RANK, self.hhenl.rank)
self.assertEqual(METRIC, self.hhenl.metric)
self.assertEqual(NNV, self.hhenl.num_normal_vectors)
self.assertEqual(NHH, self.hhenl.num_hyperplane_hashers)
def test_init_2(self):
"""Attribute self.hhs is a list of HyperplaneHasher objects of
length = self.num_hyperplane_hashers. Each HH object has the expected
value for 'num_normal_vectors'."""
hhs = self.hhenl.hhs
self.assertIsInstance(hhs, list)
for hh in hhs:
self.assertIsInstance(hh, HyperplaneHasher)
self.assertEqual(hh.num_normal_vectors, NNV)
self.assertEqual(len(hhs), self.hhenl.num_hyperplane_hashers)
def test_init_3(self):
"""Total set of labels in all chambers of any given HyperplaneHasher object
equals set(self.feature_vecs_ids)."""
hhs = self.hhenl.hhs
for hh in hhs:
chamber_ids = set([hh.get_chamber_id(vec) for vec in self.feature_vecs])
labels_set_list = [hh.chamber_labels(ch_id) for ch_id in chamber_ids]
labels_set = reduce(lambda x, y: x.union(y), labels_set_list)
self.assertEqual(labels_set, set(self.feature_vecs_ids))
self.assertEqual(len(labels_set), NUM_VECS)
def test_label_chamber_ensemble_1(self):
"""For each underlying HyperplaneHasher object, a new label is
added to precisely one chamber. The set of chamber ids present as keys
in self.kvstore is either unchanged, or enlarged by one element."""
feature_vecs = self.feature_vecs
old_chamber_ids = {hh: set([hh.get_chamber_id(vec) for vec in feature_vecs]) for hh in self.hhenl.hhs}
old_chamber_labels = {hh: [hh.chamber_labels(ch_id) for ch_id in old_chamber_ids[hh]] for hh in self.hhenl.hhs}
new_vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
self.hhenl._label_chamber_ensemble(new_vec, 'new_vec_id')
feature_vecs.append(new_vec)
new_chamber_ids = {hh: set([hh.get_chamber_id(vec) for vec in feature_vecs]) for hh in self.hhenl.hhs}
new_chamber_labels = {hh: [hh.chamber_labels(ch_id) for ch_id in new_chamber_ids[hh]] for hh in self.hhenl.hhs}
for hh in self.hhenl.hhs:
len_diff = len(new_chamber_ids[hh]) - len(old_chamber_ids[hh])
self.assertIn(len_diff, [0, 1])
if len_diff == 0:
#vector 'new_vec' has landed in an existing chamber.
#the set of chamber ids thus remains unchanged, but
#exactly one chamber has exactly one new label,
#namely 'new_vec_id'
self.assertEqual(old_chamber_ids[hh], new_chamber_ids[hh])
comparison = list(np.array(old_chamber_labels[hh]) == np.array(new_chamber_labels[hh]))
expected_bools = set([False] + [True] * (len(old_chamber_ids) - 1))
self.assertEqual(set(comparison), expected_bools)
label_diff = new_chamber_labels[hh][comparison.index(False)].difference(old_chamber_labels[hh][comparison.index(False)])
self.assertEqual(label_diff, set(['new_vec_id']))
if len_diff == 1:
#vector 'new_vec' has landed in a new chamber.
#The id of the new chamber is that of the chamber to
#which 'new_vec' belongs, and the new chamber
#is exactly set(['new_vec_id']).
id_diff = new_chamber_ids[hh].difference(old_chamber_ids[hh])
self.assertEqual(id_diff, set([hh.get_chamber_id(new_vec)]))
labels_diff = [entry for entry in new_chamber_labels[hh] if entry not in old_chamber_labels[hh]][0]
self.assertEqual(labels_diff, set(['new_vec_id']))
def test_label_chamber_ensemble_2(self):
"""Throws ValueError if len(vec) != self.rank."""
new_vec_short = HyperplaneHasher._random_vectors(1, self.hhenl.rank - 1)[0]
self.assertRaises(ValueError, self.hhenl._label_chamber_ensemble, *[new_vec_short, 'new_vec_short_id'])
def test_bulk_label_chamber_ensemble_1(self):
"""Throws ValueError if len(vec) != self.rank for any vec in vecs."""
vec_short = HyperplaneHasher._random_vectors(1, self.hhenl.rank - 1)
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank) + vec_short
vec_ids = self.letters[:11]
self.hhenl._bulk_label_chamber_ensemble(vecs[:10], vec_ids[:10])
self.assertRaises(ValueError, self.hhenl._bulk_label_chamber_ensemble, *[vecs, vec_ids])
def test_bulk_label_chamber_ensemble_2(self):
"""Throws ValueError if len(vec_ids) != len(vec_ids)."""
self._bulk_list_length_error(self.hhenl._bulk_label_chamber_ensemble)
def test_bulk_label_chamber_ensemble_3(self):
"""If vec_ids are all unknown, then for each hh in self.hhenl.hhs, the difference in the
union over all chamber_ids in hh.get_chamber_ids() of hh.chamber_labels(chamber_id), before
and after the bulk_label, is equal to vec_ids."""
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank)
vec_ids = self.letters[:10]
labels_before = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
self.hhenl._bulk_label_chamber_ensemble(vecs, vec_ids)
labels_after = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
for b, a in zip(labels_before, labels_after):
self.assertEqual(a.difference(b), set(vec_ids))
def test_bulk_label_chamber_ensemble_4(self):
"""If vec_ids are partially known, then for each hh in self.hhenl.hhs, the difference in the
union over all chamber_ids in hh.get_chamber_ids() of hh.chamber_labels(chamber_id), before
and after the bulk_label, is equal to the unknown vec_ids."""
vecs = HyperplaneHasher._random_vectors(24, self.hhenl.rank)
old_vec_ids = self.feature_vecs_ids[:11]
new_vec_ids = self.letters[:13]
vec_ids = old_vec_ids + new_vec_ids
labels_before = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
self.hhenl._bulk_label_chamber_ensemble(vecs, vec_ids)
labels_after = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
for b, a in zip(labels_before, labels_after):
self.assertEqual(a.difference(b), set(new_vec_ids))
def test_bulk_label_chamber_ensemble_5(self):
"""Let first = [first_1, first_2, ..., first_n] and second = [second_1, second_2, ..., second_n] be
lists of labels, and vecs = [vec_1, vec_2, ..., vec_n] a list of vectors. Then after applying the method
first to (vecs, first), then to (vecs, second), all chambers C in all hh in self.hhenl.hhs have the property
that first_i in C iff second_i in C."""
vecs = HyperplaneHasher._random_vectors(20, self.hhenl.rank)
first_ex = re.compile(r'first_([\S]*)')
second_ex = re.compile(r'second_([\S]*)')
first = ['first_%i' % i for i in range(20)]
second = ['second_%i' % i for i in range(20)]
self.hhenl._bulk_label_chamber_ensemble(vecs, first)
self.hhenl._bulk_label_chamber_ensemble(vecs, second)
for hh in self.hhenl.hhs:
ch_ids = hh.get_chamber_ids()
for ch_id in ch_ids:
labels = hh.chamber_labels(ch_id)
flabels = [''.join(first_ex.findall(label)) for label in labels]
first_labels = set([entry for entry in flabels if len(entry) > 0])
slabels = [''.join(second_ex.findall(label)) for label in labels]
second_labels = set([entry for entry in slabels if len(entry) > 0])
self.assertEqual(first_labels, second_labels)
def test_get_nn_candidates_1(self):
"""Returned objects is a set of strings of length
at least num_neighbours."""
vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
nn = 10
result = self.hhenl._get_nn_candidates(vec, nn)
self.assertIsInstance(result, set)
for element in result:
self.assertIsInstance(element, str)
self.assertGreaterEqual(len(result), nn)
def test_get_nn_candidates_2(self):
"""Throws ValueError if len(vec_ids) != len(vec_ids)."""
self._rank_error(self.hhenl._get_nn_candidates)
def test_get_vector_ids_1(self):
"""Fetched vector ids are the expected ones."""
self.assertEqual(set(self.feature_vecs_ids), self.hhenl.get_vector_ids())
def test_get_vector_1(self):
"""The returned object is a numpy array of length self.rank."""
vec_id = self.feature_vecs_ids[0]
vec = self.hhenl.get_vector(vec_id)
self.assertIsInstance(vec, np.ndarray)
self.assertEqual(len(vec), self.hhenl.rank)
self.assertTrue((self.feature_vecs[0]==vec).all())
def test_get_vector_2(self):
"""Throws KeyError if 'vec_id' is unrecognised by underlying
KeyValueStore object."""
vec_id = 'non_existent_vec'
self.assertRaises(KeyError, self.hhenl.get_vector, vec_id)
def test_bulk_get_vector_1(self):
"""The returned object is a list of numpy arrays, each of length self.rank."""
def check_vec(vec):
self.assertIsInstance(vec, np.ndarray)
self.assertEqual(len(vec), self.hhenl.rank)
ids = self.feature_vecs_ids
vecs = self.hhenl.bulk_get_vector(ids)
self.assertIsInstance(vecs, list)
[check_vec(vec) for vec in vecs]
def test_bulk_get_vector_2(self):
"""Method returns a list of length equal to the number of recognised
vector ids."""
vec_ids = self.feature_vecs_ids
ids_1 = vec_ids + ['non_existent_vec_%i' % i for i in range(5)]
ids_2 = ['non_existent_vec_%i' % i for i in range(5)]
vecs_1 = self.hhenl.bulk_get_vector(ids_1)
vecs_2 = self.hhenl.bulk_get_vector(ids_2)
self.assertEqual(len(vecs_1), len(vec_ids))
self.assertEqual(len(vecs_2), 0)
def test_bulk_get_vector_3(self):
"""Copies of the stored vectors are returned, rather than the vectors themselves.
Thus changing any of the returned vectors does _not_ affect the stored versions."""
vec_ids = self.feature_vecs_ids
original = self.hhenl.bulk_get_vector(vec_ids)
first = self.hhenl.bulk_get_vector(vec_ids)
for vec in first:
vec[0] = 11.0
second = self.hhenl.bulk_get_vector(vec_ids)
for f, s, o in zip(first, second, original):
self.assertTrue((s == o).all())
self.assertTrue((f != o).any())
def test_get_rank_1(self):
"""Returns a positive integer agreeing with the length
of a known vector, and with the underlying 'rank' attribute."""
vec_id = self.feature_vecs_ids[0]
vec = self.hhenl.get_vector(vec_id)
returned_rank = self.hhenl.get_rank()
self.assertEqual(self.hhenl.rank, returned_rank)
self.assertEqual(len(vec), returned_rank)
def test_delete_vector_1(self):
"""Removes 'vec' both from self.hhenl.kvstore, and from all chambers
of all underlying HyperplaneHasher objects."""
vec = self.feature_vecs[0]
vec_id = self.feature_vecs_ids[0]
self.hhenl.delete_vector(vec, vec_id)
self.assertRaises(KeyError, self.hhenl.get_vector, vec_id)
all_vec_ids = self.hhenl.get_vector_ids()
self.assertNotIn(vec_id, all_vec_ids)
for hh in self.hhenl.hhs:
chamber_id = hh.get_chamber_id(vec)
self.assertNotIn(vec_id, hh.chamber_labels(chamber_id))
def test_delete_vector_2(self):
"""Throws KeyError if 'vec_id' is not a key in the underlying KeyValueStore object,
throws ValueError if len(vec) != self.rank."""
vec = self.feature_vecs[0]
self._rank_error(self.hhenl.delete_vector)
self.assertRaises(KeyError, self.hhenl.delete_vector, *[vec, 'non_existent_id'])
def test_add_vector_1(self):
"""Adds 'vec' both to self.hhenl.kvstore, and to exactly one chamber
of each underlying HyperplaneHasher object. Subsequently, the lists of keys of
vectors in the objects self.hhenl.kvstore and self.hhenl.hhs[i].kvstore
are identical, for all i."""
vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
vec_id = 'new'
self.hhenl.add_vector(vec, vec_id)
self.assertTrue((self.hhenl.get_vector(vec_id)==vec).all())
all_vec_ids = self.hhenl.get_vector_ids()
self.assertIn(vec_id, all_vec_ids)
for hh in self.hhenl.hhs:
chamber_id = hh.get_chamber_id(vec)
self.assertIn(vec_id, hh.chamber_labels(chamber_id))
def test_add_vector_2(self):
"""Throws ValueError if len(vec) != self.rank."""
self._rank_error(self.hhenl.add_vector)
def test_bulk_add_vector_1(self):
"""Throws ValueError if len(vec) != self.rank for vec in vecs."""
self._bulk_rank_error(self.hhenl.bulk_add_vector)
def test_bulk_add_vector_2(self):
"""Throws ValueError if len(vec) != self.rank for vec in vecs."""
self._bulk_list_length_error(self.hhenl.bulk_add_vector)
def _check_new_vec_ids_added(self, hhenl, vecs, vec_ids):
"""The set theoretic difference between hhenl.get_vector_ids_post and
self.hhenl.get_vector_ids_pre is equal to the set-theoretic difference
between set(vec_ids) and self.hhenl.get_vector_ids_pre."""
ids_pre = self.hhenl.get_vector_ids()
expected_diff = set(vec_ids).difference(ids_pre)
self.hhenl.bulk_add_vector(vecs, vec_ids)
ids_post = self.hhenl.get_vector_ids()
actual_diff = ids_post.difference(ids_pre)
return (actual_diff, expected_diff)
def test_bulk_add_vector_3(self):
"""The set theoretic difference between self.hhenl.get_vector_ids_post and
self.hhenl.get_vector_ids_pre is equal to the set of new vector ids."""
vecs = self.feature_vecs[:10]
vec_ids = self.letters[:10]
new_vec_ids = self.letters[5:15]
actual_diff, expected_diff = self._check_new_vec_ids_added(self.hhenl, vecs, vec_ids)
self.assertEqual(actual_diff, expected_diff)
actual_diff, expected_diff = self._check_new_vec_ids_added(self.hhenl, vecs, new_vec_ids)
self.assertEqual(actual_diff, expected_diff)
def test_bulk_add_vector_4(self):
"""The method is idempotent."""
vecs = self.feature_vecs[:10]
vec_ids = self.letters[:10]
_, _ = self._check_new_vec_ids_added(self.hhenl, vecs, vec_ids)
actual_diff, expected_diff = self._check_new_vec_ids_added(self.hhenl, vecs, vec_ids)
self.assertEqual(actual_diff, set())
self.assertEqual(actual_diff, set())
def _chamberwise_compare(self, hhenl_1, hhenl_2):
"""Check that the chambers of all hh objects attached to each
of hhenl_1 and hhenl_2 contain the same labels."""
for hh_1, hh_2 in zip(hhenl_1.hhs, hhenl_2.hhs):
hh_1_ids, hh_2_ids = hh_1.get_chamber_ids(), hh_2.get_chamber_ids()
self.assertEqual(self._get_all_hh_labels(hh_1), self._get_all_hh_labels(hh_1))
self.assertEqual(hh_1_ids, hh_2_ids)
for ch_id_1, ch_id_2 in zip(hh_1_ids, hh_2_ids):
print 'Bulk labels'
print hh_1.chamber_labels(ch_id_1)
print 'Serial labels'
print hh_2.chamber_labels(ch_id_2)
self.assertEqual(hh_1.chamber_labels(ch_id_1), hh_2.chamber_labels(ch_id_2))
print '\n'
def _delete_all_vectors(self, hhenl):
"""Calls delete_vector(vec_id) for every vec_id."""
vec_ids = hhenl.get_vector_ids()
vecs = [hhenl.get_vector(vec_id) for vec_id in vec_ids]
for vec, vec_id in zip(vecs, vec_ids):
hhenl.delete_vector(vec, vec_id)
def _create_hhs_chamber_label_list(self, hhenl):
"""Returns a list [ch_label_list_1, ..., chamber_label_list_n] of lists,
where ch_label_list_i is the list of pairs (chamber_id, labels) associated
to the i-th HyperplaneHasher object in hhenl, and labels is the set of labels
in chamber chamber_id."""
hhs_ch_label_list = []
for hh in hhenl.hhs:
ch_ids = list(hh.get_chamber_ids())
ch_ids.sort()
ch_label_list = [(ch_id, hh.chamber_labels(ch_id)) for ch_id in ch_ids]
hhs_ch_label_list.append(ch_label_list)
return hhs_ch_label_list
def test_bulk_add_vector_5(self):
"""Calling the method on (vecs, vec_ids) has the same effect as
calling add_vector(vec, vec_id), for all (vec, vec_id) in
zip(vecs, vec_ids)."""
vecs = self.feature_vecs[:10]
vec_ids = self.letters[:10]
hhenl = self._create_hhenl()
hhenl.bulk_add_vector(vecs, vec_ids)
list_bulk = self._create_hhs_chamber_label_list(hhenl)
self._delete_all_vectors(hhenl)
for vec, vec_id in zip(vecs, vec_ids):
hhenl.add_vector(vec, vec_id)
list_serial = self._create_hhs_chamber_label_list(hhenl)
self.assertEqual(list_bulk, list_serial)
def test_find_neighbours_1(self):
"""Returns a pandas series of length 'num_neighbours', indexed
by keys that can successfully be passed to the get_vector() method.
The entries of 'ser' are non-negative real numbers, in ascending order.
If the input vector is known to the underlying KeyValueStore object,
then the first entry has value 0.0 and key == 'vec_id', where 'vec_id'
is the id of the input vector."""
vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
nn = 10
neighbours = self.hhenl.find_neighbours(vec, nn)
self.assertIsInstance(neighbours, pd.Series)
self.assertEqual(len(neighbours), nn)
self.assertTrue((neighbours == neighbours.order()).all())
for i in range(len(neighbours)):
self.assertGreaterEqual(neighbours[i], 0.0)
def test_find_neighbours_2(self):
"""If input vector 'vec' is known to underlying KeyValueStore object,
then first entry of output has value 0.0 and key 'vec_id', the id of 'vec'."""
vec = self.feature_vecs[0]
vec_id = self.feature_vecs_ids[0]
nn = 10
neighbours = self.hhenl.find_neighbours(vec, nn)
self.assertEqual(neighbours.iloc[0], 0.0)
self.assertEqual(neighbours.index[0], vec_id)
def test_find_neighbours_3(self):
"""Throws ValueError if len(vec) != self.rank."""
self._rank_error(self.hhenl.find_neighbours)
| mit |
B3AU/waveTree | sklearn/decomposition/tests/test_dict_learning.py | 8 | 7108 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
# this test was not actually passing before!
raise SkipTest
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dico1 = MiniBatchDictionaryLearning(n_components, n_iter=10, batch_size=1,
shuffle=False, dict_init=V,
random_state=0).fit(X)
dico2 = MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,
random_state=0)
for ii, sample in enumerate(X):
dico2.partial_fit(sample, iter_offset=ii * dico2.n_iter)
# if ii == 1: break
assert_true(not np.all(sparse_encode(X, dico1.components_, alpha=100) ==
0))
assert_array_equal(dico1.components_, dico2.components_)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
gauthiier/mailinglists | analysis/archive.py | 1 | 4681 | import numpy as np
import pandas as pd
import email, email.parser
import os, datetime, json, gzip, re
import analysis.util
import analysis.query
import search.archive ## circular...
def filter_date(msg, archive_name):
time_tz = analysis.util.format_date(msg, archive_name)
if not time_tz:
return None
dt = datetime.datetime.fromtimestamp(time_tz)
try:
date_time = pd.to_datetime(dt)
except pd.tslib.OutOfBoundsDatetime:
print('time out of bound')
print(dt)
return None
min_date = pd.to_datetime(analysis.util.min_date(archive_name), format='%d/%m/%Y')
max_date = pd.to_datetime(datetime.datetime.now())
if date_time < min_date or date_time > max_date:
return None
return date_time
def message_to_tuple_record(msg, records, archive_name, references='X'):
# check date first?
date = filter_date(msg, archive_name)
if not date:
print("Archive::filter_date returned None. Skip.")
return
# check / filter from email address second?
from_addr = analysis.util.format_from(msg, archive_name)
if not from_addr:
print("Archive::analysis.util.format_from returned None. Skip.")
return
url = analysis.util.format_url(msg, archive_name)
author = analysis.util.format_author(msg, archive_name)
subject = analysis.util.format_subject(msg, archive_name)
message_id = analysis.util.format_id(msg, archive_name)
content = analysis.util.format_content(msg, archive_name)
records.append((message_id,
from_addr,
author,
subject,
date,
url,
len(content),
0 if not 'follow-up' in msg else len(msg['follow-up']),
references))
# recursive follow up -- but references is not keeping track really...
if 'follow-up' in msg:
for f in msg['follow-up']:
message_to_tuple_record(f, records, archive_name, references=message_id)
return
def json_data_to_pd_dataframe(json_data, archive_name):
records = []
for d in json_data:
for dd in d['threads']:
message_to_tuple_record(dd, records, archive_name)
print('zzzzzzzzz ----> ' + archive_name + " ---- " + str(len(records)))
df = pd.DataFrame.from_records(records,
index='date',
columns=['message-id',
'from',
'author',
'subject',
'date',
'url',
'content-length',
'nbr-references',
'references'])
df.index.name = 'date'
return df
def load_from_file(filename, archive_name, archive_dir, json_data=None):
if not filename.endswith('.json.gz'):
file_path = os.path.join(archive_dir, filename + '.json.gz')
else:
file_path = os.path.join(archive_dir, filename)
if os.path.isfile(file_path):
with gzip.open(file_path, 'r') as fp:
json_data = json.load(fp)
return json_data_to_pd_dataframe(json_data['threads'], archive_name)
else:
#list of all "filename[...].json.gz" in archive_dir
files = sorted([f for f in os.listdir(archive_dir) if os.path.isfile(os.path.join(archive_dir, f)) and f.startswith(filename) and f.endswith('.json.gz')])
if files:
filename = files[-1] # take the most recent (listed alpha-chronological)
file_path = os.path.join(archive_dir, filename)
if os.path.isfile(file_path):
with gzip.open(file_path, 'r') as fp:
json_data = json.load(fp)
return json_data_to_pd_dataframe(json_data['threads'], archive_name)
else:
#list of all json files in archive_dir/filename
dir_path = os.path.join(archive_dir, filename)
if not os.path.isdir(dir_path):
return None
files = [os.path.join(dir_path, f) for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f)) and f.endswith('.json')]
if not files:
return None
# load all json files
threads = []
for file_path in files:
with open(file_path, 'r') as fp:
json_data = json.load(fp)
threads.append(json_data)
print('---> ' + archive_name)
return json_data_to_pd_dataframe(threads, archive_name)
def load_from_search_archive(archive):
threads = []
for k, v in archive.archive.items():
threads.append(v)
return json_data_to_pd_dataframe(threads, archive.archive_name)
class Archive:
data = None # "raw" json data
dataframe = None # main pd dataframe
def __init__(self, archive_name, archive_dir="archives"):
if isinstance(archive_name, pd.core.frame.DataFrame):
self.dataframe = archive_name ## no copies here
if isinstance(archive_name, search.archive.Archive):
self.dataframe = load_from_search_archive(archive_name)
if isinstance(archive_name, str):
# need a filename or a dir name....
self.dataframe = load_from_file(archive_name, archive_name, archive_dir, self.data)
def query(self):
q = analysis.query.Query(self)
return q
| gpl-3.0 |
bmazin/ARCONS-pipeline | mosaicing/test/Mosaic_matchFinder.py | 1 | 1859 | import math
import numpy as np
from util import ObsFileSeq as ObsFileSeq
from util import utils
import pyfits
from util.popup import PopUp, plotArray
import matplotlib.pyplot as plt
import pickle
import astrometry.CentroidCalc as cc
import mosaicing.matchFinder as mf
#import obsfileViewerTest as ovt
# Define a set of observation files for this mosaic run
name='ObjectFinderDemoMosaic'
run = "PAL2014"
date = "20141020"
timeStampList = [
'20141021-033954',
'20141021-034532',
'20141021-035035',
'20141021-035538',
'20141021-040041',
'20141021-040544',
'20141021-041047',
]
dt = 200
ofs = ObsFileSeq.ObsFileSeq(name,run,date,timeStampList,dt)
RA = 283.3961625 #degrees
Dec = 33.029175
fd = ofs.getFrameDict()
ofs.loadSpectralCubes()
rmi = open('ObjectFinderDemoMosaic-cubes.pkl', 'rb')
data = pickle.load(rmi)
iFrameList = range(len(ofs.frameObsInfos))
image_list = []
for iFrame in iFrameList:
c = data[iFrame]['cube']
c = np.sum(c, axis = 2)
t = data[iFrame]['effIntTime']
image = c/t
nanspot = np.isnan(image)
image[nanspot] = 0
image_list.append(image)
#these are all the matching stars i found in the 66 frames
#frame_list = np.array([image_list[0], image_list[10], image_list[11], image_list[18], image_list[19], image_list[28], image_list[29], image_list[34], image_list[35], image_list[36], image_list[37], image_list[42], image_list[43], image_list[65]])
#these are the frames previously used in chris's example
frame_list = np.array([image_list[0], image_list[28], image_list[29], image_list[65]])
#degPerPix, theta, raArcsecPerSec = ObsFileSeq.ObjectFinder(image_list, fd, RA, Dec)
degPerPix, theta, raArcsecPerSec = mf.ObjectFinder(frame_list, fd, RA, Dec)
ofs.setRm(degPerPix,
math.degrees((theta)),
raArcsecPerSec,
)
mosaic = ofs.makeMosaicImage(range(66))
plotArray(mosaic)
| gpl-2.0 |
flightgong/scikit-learn | sklearn/ensemble/forest.py | 2 | 52025 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# License: BSD 3 clause
from __future__ import division
import itertools
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import array2d, check_random_state, check_arrays, safe_asarray
from ..utils.validation import DataConversionWarning
from .base import BaseEnsemble, _partition_estimators
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(trees, forest, X, y, sample_weight, verbose):
"""Private function used to build a batch of trees within a job."""
for i, tree in enumerate(trees):
if verbose > 1:
print("building tree %d of %d" % (i + 1, len(trees)))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
random_state = check_random_state(tree.random_state)
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
tree.fit(X, y,
sample_weight=curr_sample_weight,
check_input=False)
tree.indices_ = sample_counts > 0.
else:
tree.fit(X, y,
sample_weight=sample_weight,
check_input=False)
return trees
def _parallel_predict_proba(trees, X, n_classes, n_outputs):
"""Private function used to compute a batch of predictions within a job."""
n_samples = X.shape[0]
if n_outputs == 1:
proba = np.zeros((n_samples, n_classes))
for tree in trees:
proba_tree = tree.predict_proba(X)
if n_classes == tree.n_classes_:
proba += proba_tree
else:
proba[:, tree.classes_] += \
proba_tree[:, range(len(tree.classes_))]
else:
proba = []
for k in xrange(n_outputs):
proba.append(np.zeros((n_samples, n_classes[k])))
for tree in trees:
proba_tree = tree.predict_proba(X)
for k in xrange(n_outputs):
if n_classes[k] == tree.n_classes_[k]:
proba[k] += proba_tree[k]
else:
proba[k][:, tree.classes_] += \
proba_tree[k][:, range(len(tree.classes_))]
return proba
def _parallel_predict_regression(trees, X):
"""Private function used to compute a batch of predictions within a job."""
return sum(tree.predict(X) for tree in trees)
def _parallel_apply(tree, X):
"""Private helper function for parallizing calls to apply in a forest."""
return tree.tree_.apply(X)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = array2d(X, dtype=DTYPE)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_apply)(tree, X) for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense")
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = self._validate_y(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
trees = []
for i in range(self.n_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Free allocated memory, if any
self.estimators_ = None
# Parallel loop: we use the threading backend as the Cython code for
# fitting the trees is internally releasing the Python GIL making
# threading always more efficient than multiprocessing in that case.
all_trees = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
trees[starts[i]:starts[i + 1]],
self,
X,
y,
sample_weight,
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ = list(itertools.chain(*all_trees))
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return y
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return sum(tree.feature_importances_
for tree in self.estimators_) / self.n_estimators
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _set_oob_score(self, X, y):
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y(self, y):
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.n_outputs_)
for i in range(n_jobs))
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`classes_`: array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
`n_classes_`: int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "max_features",
"max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "max_features",
"max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`classes_`: array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
`n_classes_`: int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "max_features",
"max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None,
compute_importances=None):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "max_features",
"max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
sparse_output: bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
min_density=None):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "max_features",
"max_leaf_nodes", "random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
def _set_oob_score(*args):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None):
"""Fit estimator.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
"""
self.fit_transform(X, y)
return self
def fit_transform(self, X, y=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = safe_asarray(X)
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data to be transformed.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
ds-hwang/deeplearning_udacity | python_practice/01_basics.py | 1 | 4919 |
# coding: utf-8
# In[ ]:
"""Summary of tensorflow basics.
Parag K. Mital, Jan 2016."""
# In[13]:
# %% Import tensorflow and pyplot
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# In[ ]:
# %% tf.Graph represents a collection of tf.Operations
# You can create operations by writing out equations.
# By default, there is a graph: tf.get_default_graph()
# and any new operations are added to this graph.
# The result of a tf.Operation is a tf.Tensor, which holds
# the values.
# In[14]:
# %% First a tf.Tensor
n_values = 32
x = tf.linspace(-3.0, 3.0, n_values)
# In[17]:
# %% Construct a tf.Session to execute the graph.
sess = tf.Session()
result = sess.run(x)
print(result)
# In[20]:
# %% Alternatively pass a session to the eval fn:
x.eval(session=sess)
# x.eval() does not work, as it requires a session!
# x.eval()
# In[30]:
# %% We can setup an interactive session if we don't
# want to keep passing the session around:
sess.close()
sess = tf.InteractiveSession()
# In[31]:
# %% Now this will work!
x.eval()
# In[32]:
# %% Now a tf.Operation
# We'll use our values from [-3, 3] to create a Gaussian Distribution
sigma = 1.0
mean = 0.0
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
# In[33]:
# %% By default, new operations are added to the default Graph
assert z.graph is tf.get_default_graph()
print z.graph
# In[ ]:
#plt.close('all')
# %% Execute the graph and plot the result
plt.plot(z.eval())
plt.show()
# In[ ]:
# %% We can find out the shape of a tensor like so:
print(z.get_shape())
# In[35]:
# %% Or in a more friendly format
print(z.get_shape().as_list())
# In[36]:
# %% Sometimes we may not know the shape of a tensor
# until it is computed in the graph. In that case
# we should use the tf.shape fn, which will return a
# Tensor which can be eval'ed, rather than a discrete
# value of tf.Dimension
print(tf.shape(z).eval())
# In[ ]:
# %% We can combine tensors like so:
print(tf.pack([tf.shape(z), tf.shape(z), [3], [4]]).eval())
# In[ ]:
# %% Let's multiply the two to get a 2d gaussian
z_2d = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
# In[ ]:
# %% Execute the graph and store the value that `out` represents in `result`.
plt.imshow(z_2d.eval())
# In[ ]:
# %% For fun let's create a gabor patch:
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
z = tf.mul(tf.matmul(x, y), z_2d)
plt.imshow(z.eval())
# In[ ]:
# %% We can also list all the operations of a graph:
ops = tf.get_default_graph().get_operations()
print([op.name for op in ops])
# In[ ]:
# %% Lets try creating a generic function for computing the same thing:
def gabor(n_values=32, sigma=1.0, mean=0.0):
x = tf.linspace(-3.0, 3.0, n_values)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
gauss_kernel = tf.matmul(
tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel)
return gabor_kernel
# In[ ]:
# %% Confirm this does something:
plt.imshow(gabor().eval())
# In[ ]:
# %% And another function which can convolve
def convolve(img, W):
# The W matrix is only 2D
# But conv2d will need a tensor which is 4d:
# height x width x n_input x n_output
if len(W.get_shape()) == 2:
dims = W.get_shape().as_list() + [1, 1]
W = tf.reshape(W, dims)
if len(img.get_shape()) == 2:
# num x height x width x channels
dims = [1] + img.get_shape().as_list() + [1]
img = tf.reshape(img, dims)
elif len(img.get_shape()) == 3:
dims = [1] + img.get_shape().as_list()
img = tf.reshape(img, dims)
# if the image is 3 channels, then our convolution
# kernel needs to be repeated for each input channel
W = tf.concat(2, [W, W, W])
# Stride is how many values to skip for the dimensions of
# num, height, width, channels
convolved = tf.nn.conv2d(img, W,
strides=[1, 1, 1, 1], padding='SAME')
return convolved
# In[ ]:
# %% Load up an image:
from skimage import data
img = data.astronaut()
plt.imshow(img)
plt.show()
print(img.shape)
# In[ ]:
# %% Now create a placeholder for our graph which can store any input:
x = tf.placeholder(tf.float32, shape=img.shape)
# In[ ]:
# %% And a graph which can convolve our image with a gabor
out = convolve(x, gabor())
# In[ ]:
# %% Now send the image into the graph and compute the result
result = tf.squeeze(out).eval(feed_dict={x: img})
plt.imshow(result)
plt.show()
| mit |
thesuperzapper/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 71 | 12923 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
keitaroyam/yamtbx | yamtbx/dataproc/auto/command_line/auto_data_proc_gui.py | 1 | 84315 | """
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
from yamtbx.dataproc.auto import gui_config as config
from yamtbx.dataproc.auto import gui_logger as mylog
from yamtbx.dataproc.auto import html_report
from yamtbx.dataproc.xds import xds_inp
from yamtbx.dataproc.xds import get_xdsinp_keyword
from yamtbx.dataproc.xds import idxreflp
from yamtbx.dataproc.xds import integratelp
from yamtbx.dataproc.xds import correctlp
from yamtbx.dataproc.xds import xdsstat
from yamtbx.dataproc.xds import xparm
from yamtbx.dataproc.xds.command_line import estimate_resolution_by_spotxds
from yamtbx.dataproc.adxv import Adxv
from yamtbx.dataproc import dataset
from yamtbx.dataproc.bl_logfiles import BssJobLog
from yamtbx.dataproc.auto.command_line.multi_check_cell_consistency import CellGraph
from yamtbx.util import batchjob, directory_included, read_path_list, safe_float, expand_wildcard_in_list
from yamtbx.util.xtal import format_unit_cell
import iotbx.phil
import libtbx.phil
from libtbx.utils import multi_out
import libtbx.easy_mp
from cctbx import sgtbx
from cctbx import crystal
from cctbx.crystal import reindex
import wx
#import wx.gizmos
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ListCtrlAutoWidthMixin
import wx.html
import wx.lib.newevent
import wx.lib.agw.pybusyinfo
import wx.lib.scrolledpanel
import matplotlib
import matplotlib.backends.backend_wxagg
import numpy
import os
import sys
import re
import datetime
import time
import StringIO
import cPickle as pickle
import glob
import threading
import traceback
import pipes
import copy
EventShowProcResult, EVT_SHOW_PROC_RESULT = wx.lib.newevent.NewEvent()
EventLogsUpdated, EVT_LOGS_UPDATED = wx.lib.newevent.NewEvent()
gui_phil_str = """\
topdir = None
.type = path
.help = files/subdirectories in this directory will be processed
include_dir = None
.type = path
.multiple = true
.help = directories to include (those not matched will be excluded)
exclude_dir = None
.type = path
.multiple = true
.help = directories to exclude (those not matched will be included)
workdir = _kamoproc
.type = str
.help = working directory. if relpath, workdir will be made in topdir.
adxv = None
.type = path
.help = adxv command
bl = 32xu 41xu 44xu 45xu 26b1 26b2 38b1 12b2 other
.type = choice(multi=False)
.help = Choose beamline where you start program
blconfig = []
.type = path
.multiple = true
mode = zoo *normal
.type = choice(multi=True)
.help = When Zoo, specify mode=zoo
date = "today"
.type = str
.help = Data collection date ("today" or %Y-%d-%m format)
checklog_daybefore = 2
.type = int
.help = if 2 specified, check BSS log file from given date -2 days
jobspkl = None
.type = path
.help = Load jobs.pkl and don't find new jobs (for review)
logwatch_interval = 30
.type = float
.help = interval in sec to check BSS log & processing results
logwatch_once = None
.type = bool
.help = find datasets only once (when program started). Default: true if bl=other, otherwise false.
logwatch_target = local blconfig *dataset_paths_txt
.type = choice
.help = "How to find datasets. local: just traverse subdirectories to find data;"
"blconfig: BSS log files in BLCONFIG/log/ are checked;"
"dataset_paths_txt: on SPring-8 beamlines ~/.dataset_paths_for_kamo_BLNAME.txt is checked, otherwise users should give a path."
dataset_paths_txt = None
.type = path
.help = "A text file that contains dataset_template, start, end frame numbers separeted by comma in each line."
"If specified, the update of the file is checked and data processing will start."
"Each line should end with newline character."
"template should look lie /foo/bar_??????.cbf"
check_all_files_exist = True
.type = bool
.help = "Check all files in job exist before starting processing"
auto_mode = true
.type = bool
.help = automatically start processing when data collection finished.
small_wedges = true
.type = bool
.help = Optimized for small wedge data processing
batch {
engine = *sge sh
.type = choice(multi=False)
sge_pe_name = par
.type = str
.help = pe name (put after -pe option)
nproc_each = 4
.type = int
.help = maximum number of cores used for single data processing
sh_max_jobs = Auto
.type = int
.help = maximum number of concurrent jobs when engine=sh
}
use_tmpdir_if_available = true
.type = bool
.help = Use ramdisk or tempdir if sufficient size is available
known {
unit_cell = None
.type = floats(size=6)
.help = cell dimension
space_group = None
.type = str
.help = space group (no. or name)
method = *not_use_first use_first symm_constraint_only correct_only
.type = choice(multi=False)
.help = "not_use_first: Try indexing without prior information first, and if failed, use prior."
"use_first: Try indexing with prior information."
"symm_constraint_only: Try indexing without prior information, and apply symmetry constraints for determined unit cell."
"correct_only: Use given symmetry in CORRECT. May be useful in recycling."
force = true
.type = bool
.help = "Force to use given symmetry in scaling"
}
auto_frame_exclude_spot_based = false
.type = bool
.help = automatic frame exclusion from integration based on spot search result.
exclude_ice_resolutions = false
.type = bool
anomalous = true
.type = bool
engine = *xds dials
.type = choice(multi=False)
xds {
use_dxtbx = False
.type = bool
.help = Use dxtbx for generation of XDS.INP
minpk = None
.type = float
exclude_resolution_range = None
.type = floats(size=2)
.multiple = true
repeat = 1
.type = int(value_min=1)
.help = if more than 1, copy GXPARM.XDS to XPARM.XDS and re-integrate
ex = []
.type = str
.multiple = true
.help = extra keywords for XDS.INP
reverse_phi = None
.type = bool
.help = "Automatic decision if None (by default)."
override {
geometry_reference = None
.type = path
.help = "XDS.INP or json file of dials"
fix_geometry_when_reference_provided = False
.type = bool
.help = "Don't refine geometric parameters when geometry_reference= specified."
rotation_axis = None
.type = floats(size=3)
.help = "override ROTATION_AXIS= "
}
}
dials {
scan_varying = False
.type = bool
}
merging {
cell_grouping {
tol_length = None
.type = float
.help = relative_length_tolerance
tol_angle = None
.type = float
.help = absolute_angle_tolerance in degree
}
}
split_hdf_miniset = true
.type = bool
.help = Whether or not minisets in hdf5 are treated individually.
split_data_by_deg = None
.type = float
.help = Split data with specified degrees. Currently only works with bl=other.
log_root = None
.type = path
.help = debug log directory
"""
def read_override_config(imgdir):
ret = {}
cfgin = os.path.join(imgdir, "kamo_override.config")
if not os.path.isfile(cfgin): return ret
for l in open(cfgin):
l = l.strip()
if l == "": continue
if l.startswith("wavelength="):
ret["wavelength"] = float(l[l.index("=")+1:].strip())
elif l.startswith("distance="):
ret["distance"] = float(l[l.index("=")+1:].strip())
elif l.startswith("orgx="):
ret["orgx"] = float(l[l.index("=")+1:].strip())
elif l.startswith("orgy="):
ret["orgy"] = float(l[l.index("=")+1:].strip())
elif l.startswith("osc_range="):
ret["osc_range"] = float(l[l.index("=")+1:].strip())
elif l.startswith("rotation_axis="):
ret["rotation_axis"] = map(float, l[l.index("=")+1:].strip().split())
assert len(ret["rotation_axis"]) == 3
else:
shikalog.warning("Unrecognized config in %s: %s" % (cfgin, l))
mylog.info("Read override-config from %s: %s" % (cfgin, ret))
return ret
# read_override_config()
class BssJobs:
def __init__(self):
self.jobs = {} # { (path+prefix, number range) as key: }
self.jobs_prefix_lookup = {} # {prefix: number_range in keys of self.jobs}
self.bsslogs_checked = {}
self.procjobs = {} # key: batchjob
# for check_bss_log()
self._last_job_id = None
self._job_is_running = False
self._prev_job_finished = False
self._current_prefix = None
self._joblogs = []
self._chaches = {} # chache logfile objects. {filename: [timestamp, objects..]
self.cell_graph = CellGraph(tol_length=config.params.merging.cell_grouping.tol_length,
tol_angle=config.params.merging.cell_grouping.tol_angle)
self.xds_inp_overrides = []
# __init__()
def load_override_geometry(self, ref_file):
import json
try:
json.load(open(ref_file)) # if success..
self.xds_inp_overrides = xds_inp.import_geometry(dials_json=ref_file)
except:
self.xds_inp_overrides = xds_inp.import_geometry(xds_inp=ref_file)
if self.xds_inp_overrides:
mylog.info("Geometry reference loaded from %s" % ref_file)
mylog.debug("Loaded geometry: %s" % self.xds_inp_overrides)
def get_job(self, key): return self.jobs.get(key, None)
def keys(self): return self.jobs.keys()
def get_xds_workdir(self, key):
return os.path.join(config.params.workdir,
os.path.relpath(key[0]+"_%d-%d"%key[1], config.params.topdir))
def check_bss_log(self, date, daystart):
re_job_start = re.compile("Job ID ([0-9]+) start")
re_job_finish = re.compile("Job ID ([0-9]+) (Stopped|Success|Killed)")
re_prefix = re.compile("^(.*)_[x\?]+") # XXX Is this safe?
self._prev_job_finished = False
bsslogs = []
for dday in xrange(daystart, 1):
for blconfig in config.params.blconfig:
bsslog = os.path.join(blconfig, "log",
(date + datetime.timedelta(days=dday)).strftime("bss_%Y%m%d.log"))
if os.path.isfile(bsslog): bsslogs.append(bsslog)
for bsslog in bsslogs:
#print "reading", bsslog
last_line = self.bsslogs_checked.get(bsslog, -1)
read_job_flag = False
for i, l in enumerate(open(bsslog)):
try:
if i <= last_line: continue
if l.startswith("echo "): continue # Must skip this line.
if "Beamline Scheduling Software Start" in l:
# reset everything
read_job_flag = False
self._job_is_running = False
self._prev_job_finished = True
self._current_prefix = None
continue
r = re_job_start.search(l)
if r:
self._last_job_id = r.group(1)
read_job_flag = True
self._job_is_running = True
self._current_prefix = None
continue
if read_job_flag:
if "Data file:" in l:
f = l[l.index(":")+1:].strip()
r = re_prefix.search(os.path.splitext(f)[0])
if r:
self._current_prefix = r.group(1)
joblog = r.group(1) + ".log"
if directory_included(joblog, config.params.topdir,
config.params.include_dir, config.params.exclude_dir):
self._joblogs.append([joblog, None]) # Don't care if really exists or not
else:
self._current_prefix = None
self._job_is_running = False
read_job_flag = False
continue
r = re_job_finish.search(l)
if r:
self._job_is_running = False
self._prev_job_finished = True
self._current_prefix = None
continue
if self._current_prefix is not None and self._current_prefix in l:
# like /isilon/hoge/fuga/foo_000001.img [1/180]
if "start_series," in l: # 225HS shutterless
tmp = l[l.index("start_series,"):].split(",")[2]
self._joblogs[-1][1] = int(tmp)
self._current_prefix = None
elif l[l.index(self._current_prefix)+len(self._current_prefix)+2] in "0123456789": # non-shutterless
tmp = l[l.index(self._current_prefix)+len(self._current_prefix)+1:].split()[0]
self._joblogs[-1][1] = int(os.path.splitext(tmp.replace(self._current_prefix+"_", ""))[0])
self._current_prefix = None
continue
elif self._current_prefix is not None and "ExtTrigger "+os.path.basename(self._current_prefix) in l: # for pilatus
tmp = os.path.basename(self._current_prefix)
tmp2 = "ExtTrigger " + tmp
if l[l.index(tmp2)+len(tmp2)+2] in "0123456789":
tmp3 = filter(lambda x: tmp in x, l[l.index(tmp2):].split())[0]
self._joblogs[-1][1] = int(os.path.splitext(tmp3[tmp3.rindex("_")+1:])[0])
self._current_prefix = None
continue
except Exception, e:
mylog.error("Unhandled error occurred when reading %s" % bsslog)
mylog.error(" Line %d-> %s <" % (i, l.rstrip()))
mylog.error(traceback.format_exc())
raise e
self.bsslogs_checked[bsslog] = i
# Check if start number and bss log were found. If not, set line number
# check_bss_log()
def update_jobs(self, date, daystart=-2): #, joblogs, prev_job_finished, job_is_running):
self.check_bss_log(date, daystart)
mylog.debug("joblogs= %s" % self._joblogs)
if self._prev_job_finished:
for job in self.jobs.values():
job.status = "finished"
remove_idxes = []
for i, (joblog, startnum) in enumerate(self._joblogs):
if not os.path.isfile(joblog):
mylog.info("Joblog not found. not created yet? pending: %s"%joblog)
continue
bjl = BssJobLog(joblog, remove_overwritten=True)
prefix = os.path.splitext(joblog)[0] # XXX what if .gz etc?
is_running_job = (self._job_is_running and i == len(self._joblogs)-1)
looks_h5 = False
for job in bjl.jobs:
if job.get_master_h5_if_exists():
looks_h5 = True
if startnum is None:
if looks_h5:
startnum = 1
else:
mylog.error("start number not known: %s"%joblog)
continue
for j, job in enumerate(bjl.jobs):
if is_running_job and j == len(bjl.jobs)-1:
# running job
if startnum == 0: startnum = 1
nr = (startnum, startnum + job.n_images - 1)
job.status = "running"
else:
# not running job
nr = job.get_frame_num_range()
if nr.count(None) == 2:
mylog.debug("Can't get number range of finished job: %s - use expected value."%joblog)
# Should we check actual file existence?
if startnum == 0: startnum = 1
nr = (startnum, startnum + job.n_images - 1)
if None in nr:
mylog.error("number range not known: %s"%joblog)
continue
job.status = "finished"
if prefix in self.jobs_prefix_lookup:
mylog.info("Same prefix: %s,%s. What should I do? %s" % (prefix, nr, self.jobs_prefix_lookup[prefix]))
tmp_in = set(reduce(lambda x,y: x+y, map(lambda x:range(x[0],x[1]+1), self.jobs_prefix_lookup[prefix])))
# XXX Resolve overlap!!
if set(range(nr[0],nr[1]+1)).intersection(tmp_in):
print "tmp_in=", tmp_in, nr
mylog.warning("Range overlapped! Discarding this data..sorry.")
remove_idxes.append(i)
continue
if job.job_mode == "Raster scan":
remove_idxes.append(i)
continue
master_h5 = job.get_master_h5_if_exists()
multi_not_eiger = job.advanced_centering.get("mode", "") == "multiple_crystals" and "EIGER" not in job.detector.upper()
if master_h5 is not None:
job.filename = master_h5.replace("_master.h5", "_??????.h5")
for nr2 in job.get_frame_num_ranges_for_h5():
self.jobs[(prefix, nr2)] = job
self.jobs_prefix_lookup.setdefault(prefix, set()).add(nr2)
elif multi_not_eiger:
suffix_org = job.filename[job.filename.rindex("_?"):]
for k in xrange(len(job.advanced_centering.get("centers",[]))):
prefix2 = prefix + "_%.3d" % (k+1)
job2 = copy.copy(job)
job2.filename = prefix2 + suffix_org
self.jobs[(prefix2, nr)] = job2
self.jobs_prefix_lookup.setdefault(prefix2, set()).add(nr)
else:
# FIXME when multiple_crystals mode, what will filenames be?
self.jobs[(prefix, nr)] = job
self.jobs_prefix_lookup.setdefault(prefix, set()).add(nr)
remove_idxes.append(i)
remove_idxes = list(set(remove_idxes))
for i in sorted(remove_idxes, reverse=True):
del self._joblogs[i]
mylog.debug("remaining joblogs= %s" % self._joblogs)
# Dump jobs
pickle.dump(self.jobs, open(os.path.join(config.params.workdir, "jobs.pkl"), "wb"), 2)
# update_jobs()
def _register_job_from_file(self, ds, root_dir, exclude_dir):
from yamtbx.dataproc import XIO
from yamtbx.dataproc.bl_logfiles import JobInfo
tmpl, nr = ds[0], tuple(ds[1:3])
prefix = tmpl[:tmpl.index("_?" if "_?" in tmpl else "?")]
if not directory_included(tmpl, root_dir, [], exclude_dir):
mylog.info("This directory is not in topdir or in exclude_dir. Skipped: %s"%tmpl)
return
job = JobInfo(None)
job.filename = tmpl
images = filter(lambda x: os.path.isfile(x), dataset.template_to_filenames(*ds))
if len(images) == 0:
return
h = XIO.Image(images[0]).header
job.osc_end = h.get("PhiEnd", 0)
if len(images) > 1:
h_next = XIO.Image(images[1]).header
h_last = XIO.Image(images[-1]).header
job.osc_end = h_last.get("PhiEnd", 0)
if h_next.get("PhiStart", 0) == h.get("PhiStart", 0):
print "This job may be scan?:", tmpl
return
job.wavelength = h.get("Wavelength", 0)
job.osc_start = h.get("PhiStart", 0)
job.osc_step = h.get("PhiWidth", 0)
job.status = "finished"
job.exp_time = h.get("ExposureTime", 0)
job.distance = h.get("Distance", 0)
job.attenuator = None, 0
job.detector = "?"
job.prefix = prefix
if job.osc_step == 0 or job.osc_end - job.osc_start == 0:
print "This job don't look like osc data set:", tmpl
return
if config.params.split_data_by_deg is None or job.osc_step==0:
self.jobs[(prefix, nr)] = job
self.jobs_prefix_lookup.setdefault(prefix, set()).add(nr)
else:
n_per_sp = int(config.params.split_data_by_deg/job.osc_step+.5)
for i in xrange(nr[1]//n_per_sp+1):
if (i+1)*n_per_sp < nr[0]: continue
if nr[1] < i*n_per_sp+1: continue
nr2 = (max(i*n_per_sp+1, nr[0]), min((i+1)*n_per_sp, nr[1]))
self.jobs[(prefix, nr2)] = job # This will share the same job object.. any problem??
self.jobs_prefix_lookup.setdefault(prefix, set()).add(nr2)
# _register_job_from_file()
def update_jobs_from_files(self, root_dir, include_dir=[], exclude_dir=[]):
if include_dir:
include_dir = expand_wildcard_in_list(include_dir)
# Do nothing if include_dir is specified but not found after expansion
else:
include_dir = [root_dir]
# XXX what if include_dir has sub directories..
for rd in include_dir:
for ds in dataset.find_data_sets(rd, skip_0=True, skip_symlinks=False, split_hdf_miniset=config.params.split_hdf_miniset):
self._register_job_from_file(ds, root_dir, exclude_dir)
# Dump jobs
pickle.dump(self.jobs, open(os.path.join(config.params.workdir, "jobs.pkl"), "wb"), 2)
# update_jobs_from_files()
def update_jobs_from_dataset_paths_txt(self, root_dir, include_dir=[], exclude_dir=[]):
if not os.path.isfile(config.params.dataset_paths_txt):
mylog.warning("config.params.dataset_paths_txt=%s is not a file or does not exist" % config.params.dataset_paths_txt)
return
if include_dir == []: include_dir = [root_dir]
for ds in dataset.find_data_sets_from_dataset_paths_txt(config.params.dataset_paths_txt, include_dir, logger=mylog):
self._register_job_from_file(ds, root_dir, exclude_dir)
# Dump jobs
pickle.dump(self.jobs, open(os.path.join(config.params.workdir, "jobs.pkl"), "wb"), 2)
# update_jobs_from_dataset_paths_txt()
def process_data(self, key):
if key not in self.jobs:
mylog.error("Unknown job: %s" % key)
return
if config.params.engine == "xds":
self.process_data_xds(key)
elif config.params.engine == "dials":
self.process_data_dials(key)
else:
raise "Never reaches here"
# process_data()
def process_data_xds(self, key):
job = self.jobs[key]
prefix, nr = key
workdir = self.get_xds_workdir(key)
if not os.path.exists(workdir): os.makedirs(workdir)
# Prepare XDS.INP
img_files = dataset.find_existing_files_in_template(job.filename, nr[0], nr[1],
datadir=os.path.dirname(prefix), check_compressed=True)
if len(img_files) == 0:
mylog.error("No files found for %s %s" % (job.filename, nr))
return
overrides = read_override_config(os.path.dirname(job.filename))
if "rotation_axis" not in overrides and config.params.xds.override.rotation_axis:
overrides["rotation_axis"] = config.params.xds.override.rotation_axis
# XXX need to update self.jobs (display on GUI)
exclude_resolution_ranges = []
if config.params.xds.exclude_resolution_range:
exclude_resolution_ranges = config.params.xds.exclude_resolution_range
if config.params.exclude_ice_resolutions:
exclude_resolution_ranges.extend([[3.93,3.87],
[3.70,3.64],
[3.47,3.41],
[2.70,2.64],
[2.28,2.22],
[2.102,2.042],
[1.978,1.918],
[1.948,1.888],
[1.913,1.853],
[1.751,1.691],
])
xdsinp_str = xds_inp.generate_xds_inp(img_files=img_files,
inp_dir=os.path.abspath(workdir),
use_dxtbx=config.params.xds.use_dxtbx,
reverse_phi=config.params.xds.reverse_phi,
anomalous=config.params.anomalous,
spot_range="all", minimum=False,
integrate_nimages=None, minpk=config.params.xds.minpk,
exclude_resolution_range=exclude_resolution_ranges,
orgx=overrides.get("orgx",None),
orgy=overrides.get("orgy",None),
distance=overrides.get("distance",None),
wavelength=overrides.get("wavelength",None),
osc_range=overrides.get("osc_range",None),
rotation_axis=overrides.get("rotation_axis",None),
fstart=nr[0], fend=nr[1],
extra_kwds=config.params.xds.ex,
overrides=self.xds_inp_overrides,
fix_geometry_when_overridden=config.params.xds.override.fix_geometry_when_reference_provided)
open(os.path.join(workdir, "XDS.INP"), "w").write(xdsinp_str)
opts = ["multiproc=false", "topdir=.", "nproc=%d"%config.params.batch.nproc_each, "tryhard=true",
"make_report=true", "use_tmpdir_if_available=%s"%config.params.use_tmpdir_if_available,
"auto_frame_exclude_spot_based=%s"%config.params.auto_frame_exclude_spot_based]
if config.params.small_wedges: opts.append("no_scaling=true")
if None not in (config.params.known.space_group, config.params.known.unit_cell):
opts.append("cell_prior.cell=%s" % ",".join(map(lambda x: "%.3f"%x, config.params.known.unit_cell)))
opts.append("cell_prior.sgnum=%d" % sgtbx.space_group_info(config.params.known.space_group).group().type().number())
opts.append("cell_prior.method=%s" % config.params.known.method)
opts.append("cell_prior.force=%s" % config.params.known.force)
# Start batch job
job = batchjob.Job(workdir, "xds_auto.sh", nproc=config.params.batch.nproc_each)
job_str = """\
cd "%(wd)s" || exit 1
"%(exe)s" - <<+
from yamtbx.dataproc.auto.command_line.run_all_xds_simple import run_from_args
run_from_args([%(args)s])
for i in xrange(%(repeat)d-1):
run_from_args([%(args)s, "mode=recycle"])
+
""" % dict(exe=sys.executable, args=",".join(map(lambda x: '"%s"'%x, opts)),
repeat=config.params.xds.repeat,
wd=os.path.abspath(workdir))
job.write_script(job_str+"\n")
batchjobs.submit(job)
self.procjobs[key] = job
# process_data_xds()
def process_data_dials(self, key):
bssjob = self.jobs[key]
prefix, nr = key
workdir = self.get_xds_workdir(key)
if not os.path.exists(workdir): os.makedirs(workdir)
# Prepare
img_files = dataset.find_existing_files_in_template(bssjob.filename, nr[0], nr[1],
datadir=os.path.dirname(prefix), check_compressed=True)
if len(img_files) == 0:
mylog.error("No files found for %s %s" % (bssjob.filename, nr))
return
overrides = read_override_config(os.path.dirname(bssjob.filename))
# Start batch job
job = batchjob.Job(workdir, "dials_auto.sh", nproc=config.params.batch.nproc_each)
job_str = """\
cd "%(wd)s" || exit 1
"%(exe)s" - <<+
from yamtbx.dataproc.dials.command_line import run_dials_auto
import pickle
run_dials_auto.run_dials_sequence(**pickle.load(open("args.pkl")))
+
""" % dict(exe=sys.executable, #nproc=config.params.batch.nproc_each,
#filename=bssjob.filename, prefix=prefix, nr=nr,
wd=os.path.abspath(workdir))
#filename_template="%(filename)s", prefix="%(prefix)s", nr_range=%(nr)s, wdir=".", nproc=%(nproc)d)
job.write_script(job_str+"\n")
known_xs = None
if None not in (config.params.known.space_group, config.params.known.unit_cell):
known_xs = crystal.symmetry(config.params.known.unit_cell, config.params.known.space_group)
pickle.dump(dict(filename_template=bssjob.filename,
prefix=prefix,
nr_range=nr, wdir=".",
known_xs=known_xs,
overrides=overrides,
scan_varying=config.params.dials.scan_varying,
nproc=config.params.batch.nproc_each),
open(os.path.join(workdir, "args.pkl"), "w"), -1)
batchjobs.submit(job)
self.procjobs[key] = job
# process_data_dials()
def _save_chache(self, key, filename, obj):
self._chaches[(key,filename)] = (os.path.getmtime(filename), obj)
# _save_chache()
def _load_if_chached(self, key, filename):
if (key, filename) not in self._chaches: return None
if not os.path.isfile(filename): return None
last_mtime, obj = self._chaches[(key, filename)]
if last_mtime == os.path.getmtime(filename):
return obj
return None
# _load_if_chached()
def get_process_status(self, key):
prefix, nr = key
workdir = self.get_xds_workdir(key)
state = None
cmpl, sg, resn = None, None, None
if config.params.engine == "xds":
correct_lp = os.path.join(workdir, "CORRECT.LP")
if key not in self.procjobs:
if os.path.exists(os.path.join(workdir, "decision.log")):
state = batchjob.STATE_FINISHED
else:
job = self.procjobs[key]
batchjobs.update_state(job)
state = job.state
if state == batchjob.STATE_FINISHED:
if os.path.isfile(correct_lp):
lp = self._load_if_chached("correctlp", correct_lp)
if lp is None:
lp = correctlp.CorrectLp(correct_lp)
self._save_chache("correctlp", correct_lp, lp)
ISa = lp.get_ISa() if lp.is_ISa_valid() else float("nan")
resn = lp.resolution_based_on_ios_of_error_table(min_ios=1.)
self._save_chache("resn", correct_lp, resn) # for html report
sg = lp.space_group_str()
cmpl = float(lp.table["all"]["cmpl"][-1]) if "all" in lp.table else float("nan")
if not os.path.isfile(os.path.join(workdir, "XDS_ASCII.HKL")):
state = "giveup"
elif config.params.engine == "dials":
summary_pkl = os.path.join(workdir, "kamo_dials.pkl")
if key not in self.procjobs:
if os.path.exists(os.path.join(workdir, "dials_sequence.log")):
state = batchjob.STATE_FINISHED
else:
job = self.procjobs[key]
batchjobs.update_state(job)
state = job.state
if state == batchjob.STATE_FINISHED:
if os.path.isfile(summary_pkl):
pkl = self._load_if_chached("summary_pkl", summary_pkl)
if pkl is None:
pkl = pickle.load(open(summary_pkl))
self._save_chache("summary_pkl", summary_pkl, pkl)
try: resn = float(pkl.get("d_min"))
except: resn = float("nan")
try: sg = str(pkl["symm"].space_group_info())
except: sg = "?"
try:
cmpl = pkl["stats"].overall.completeness*100
except: cmpl = float("nan")
if not os.path.isfile(os.path.join(workdir, "DIALS.HKL")):
state = "giveup"
return state, (cmpl, sg, resn)
# get_process_status()
def get_process_result(self, key):
prefix, nr = key
workdir = self.get_xds_workdir(key)
ret = {}
ret["workdir"] = workdir
ret["exclude_data_ranges"] = ()
if config.params.engine == "xds":
xds_inp = os.path.join(workdir, "XDS.INP")
correct_lp = os.path.join(workdir, "CORRECT.LP")
gxparm_xds = os.path.join(workdir, "GXPARM.XDS")
stats_pkl = os.path.join(workdir, "merging_stats.pkl")
if os.path.isfile(correct_lp):
lp = correctlp.CorrectLp(correct_lp)
ret["ISa"] = lp.get_ISa() if lp.is_ISa_valid() else float("nan")
ret["resn"] = lp.resolution_based_on_ios_of_error_table(min_ios=1.)
ret["sg"] = lp.space_group_str()
ret["cmpl"] = float(lp.table["all"]["cmpl"][-1]) if "all" in lp.table else float("nan")
if lp.unit_cell is not None:
ret["cell"] = lp.unit_cell
elif os.path.isfile(gxparm_xds):
xp = xparm.XPARM(gxparm_xds)
ret["cell"] = list(xp.unit_cell)
ret["sg"] = xp.space_group_str()
if os.path.isfile(stats_pkl):
sio = StringIO.StringIO()
pickle.load(open(stats_pkl))["stats"].show(out=sio, header=False)
lines = sio.getvalue().replace("<","<").replace(">",">").splitlines()
i_table_begin = filter(lambda x: "Statistics by resolution bin:" in x[1], enumerate(lines))
if len(i_table_begin) == 1:
ret["table_html"] = "\n".join(lines[i_table_begin[0][0]+1:])
exc_frames = filter(lambda x: x[0]=="EXCLUDE_DATA_RANGE", get_xdsinp_keyword(xds_inp))
ret["exclude_data_ranges"] = map(lambda x: map(int, x[1].split()), exc_frames)
elif config.params.engine == "dials":
summary_pkl = os.path.join(workdir, "kamo_dials.pkl")
print summary_pkl
if os.path.isfile(summary_pkl):
pkl = pickle.load(open(summary_pkl))
try: ret["resn"] = float(pkl.get("d_min"))
except: ret["resn"] = float("nan")
try:
ret["sg"] = str(pkl["symm"].space_group_info())
ret["cell"] = pkl["symm"].unit_cell().parameters()
except: ret["sg"] = "?"
try:
ret["cmpl"] = pkl["stats"].overall.completeness*100
except: ret["cmpl"] = float("nan")
if "stats" in pkl:
sio = StringIO.StringIO()
pkl["stats"].show(out=sio, header=False)
lines = sio.getvalue().replace("<","<").replace(">",">").splitlines()
i_table_begin = filter(lambda x: "Statistics by resolution bin:" in x[1], enumerate(lines))
print i_table_begin
if len(i_table_begin) == 1:
ret["table_html"] = "\n".join(lines[i_table_begin[0][0]+1:])
print ret
return ret
# get_process_result()
# class BssJobs
# Singleton objects
bssjobs = None # BssJobs()
batchjobs = None # initialized in __main__
mainFrame = None
class WatchLogThread:
def __init__(self, parent):
self.parent = parent
self.interval = 10
self.thread = None
def start(self, interval=None):
self.stop()
self.keep_going = True
self.running = True
if interval is not None:
self.interval = interval
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
if self.is_running():
mylog.info("Stopping WatchLogThread.. Wait.")
self.keep_going = False
self.thread.join()
else:
mylog.info("WatchLogThread already stopped.")
def is_running(self):
return self.thread is not None and self.thread.is_alive()
def run(self):
mylog.info("WatchLogThread loop STARTED")
counter = 0
while self.keep_going:
counter += 1
if config.params.date == "today": date = datetime.datetime.today()
else: date = datetime.datetime.strptime(config.params.date, "%Y-%m-%d")
job_statuses = {}
if not (config.params.logwatch_once and counter > 1):
# check bsslog
if config.params.jobspkl is not None:
bssjobs.jobs = pickle.load(open(config.params.jobspkl))
for prefix, nr in bssjobs.jobs:
bssjobs.jobs_prefix_lookup.setdefault(prefix, set()).add(nr)
else:
if config.params.logwatch_target == "blconfig":
#joblogs, prev_job_finished, job_is_running = bssjobs.check_bss_log(date, -config.params.checklog_daybefore)
bssjobs.update_jobs(date, -config.params.checklog_daybefore) #joblogs, prev_job_finished, job_is_running)
elif config.params.logwatch_target == "dataset_paths_txt":
bssjobs.update_jobs_from_dataset_paths_txt(config.params.topdir,
config.params.include_dir, config.params.exclude_dir)
elif config.params.logwatch_target == "local":
bssjobs.update_jobs_from_files(config.params.topdir,
config.params.include_dir, config.params.exclude_dir)
else:
raise "Never reaches here"
# start jobs
if config.params.auto_mode:
for key in bssjobs.keys():
job_statuses[key] = bssjobs.get_process_status(key)
status = job_statuses[key][0]
job = bssjobs.get_job(key)
if job.status == "finished" and status is None:
if not config.params.check_all_files_exist or job.all_image_files_exist(nr=key[1]): # TODO we need timeout?
mylog.info("Automatically starting processing %s" % str(key))
bssjobs.process_data(key)
else:
mylog.info("Waiting for files: %s" % str(key))
ev = EventLogsUpdated(job_statuses=job_statuses)
wx.PostEvent(self.parent, ev)
for key in job_statuses:
if job_statuses[key][0] == "finished":
bssjobs.cell_graph.add_proc_result(key, bssjobs.get_xds_workdir(key))
# Make html report # TODO Add DIALS support
html_report.make_kamo_report(bssjobs,
topdir=config.params.topdir,
htmlout=os.path.join(config.params.workdir, "report.html"))
#print
#print "Done. Open?"
#print "firefox %s" % os.path.join(config.params.workdir, "report.html")
if self.interval == 0: # Run only once
self.keep_going = False
continue
if self.interval < 1:
time.sleep(self.interval)
else:
for i in xrange(int(self.interval/.5)):
if self.keep_going:
time.sleep(.5)
mylog.info("WatchLogThread loop FINISHED")
self.running = False
#wx.PostEvent(self.parent, EventDirWatcherStopped()) # Ensure the checkbox unchecked when accidentally exited.
# run()
# class WatchLogThread
class MyCheckListCtrl(wx.ListCtrl, CheckListCtrlMixin, ListCtrlAutoWidthMixin):
"""
http://zetcode.com/wxpython/advanced/
"""
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, wx.ID_ANY, style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_VIRTUAL)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
self.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL))
self.InsertColumn(0, "Path", wx.LIST_FORMAT_LEFT, width=400) # with checkbox
self.InsertColumn(1, "Sample ID", wx.LIST_FORMAT_LEFT, width=90)
self.InsertColumn(2, "Wavelen", wx.LIST_FORMAT_LEFT, width=80)
self.InsertColumn(3, "TotalPhi", wx.LIST_FORMAT_LEFT, width=80)
self.InsertColumn(4, "DeltaPhi", wx.LIST_FORMAT_LEFT, width=80)
self.InsertColumn(5, "Cstatus", wx.LIST_FORMAT_LEFT, width=70)
self.InsertColumn(6, "Pstatus", wx.LIST_FORMAT_LEFT, width=70)
self.InsertColumn(7, "Cmpl.", wx.LIST_FORMAT_LEFT, width=50)
self.InsertColumn(8, "SG", wx.LIST_FORMAT_LEFT, width=100)
self.InsertColumn(9, "Resn.", wx.LIST_FORMAT_LEFT, width=50)
self.items = []
self.images = []
self._items_lookup = {} # {key: idx in self.items}
self._sort_acend = True
self._sort_prevcol = None
self.Bind(wx.EVT_LIST_COL_CLICK, self.item_col_click)
# __init__()
def key_at(self, line): return self.items[line][0]
def OnGetItemText(self, line, col): return self.items[line][col+2] # [0] has key, [1] has checked state
def OnGetItemImage(self, line): return self.items[line][1]
def SetItemImage(self, line, im): # checked state
self.items[line][1] = im
self.Refresh()
# SetItemImage()
def get_item(self, key):
if key not in self._items_lookup: return None
return self.items[self._items_lookup[key]][2:]
# get_item()
def update_item(self, key, item):
if key not in self._items_lookup:
self.items.append([key, 0]+item)
self._items_lookup[key] = len(self.items)-1
else:
for i in xrange(len(item)):
self.items[self._items_lookup[key]][i+2] = item[i]
# update_item()
def item_col_click(self, ev):
col = ev.GetColumn()
if col != self._sort_prevcol:
self._sort_acend = True
else:
self._sort_acend = not self._sort_acend
perm = range(len(self.items))
def trans_func(idx):
# 0:lab, 1:sample, 2:wavelen, 3:phirange, 4:deltaphi, 5,6:status, 7:cmpl, 8:sg, 9:resn
if idx in (2, 3, 4, 7, 9): return safe_float
return lambda x: x
# trans_func()
perm.sort(key=lambda x: trans_func(col)(self.items[x][col+2]),
reverse=not self._sort_acend)
perm_table = dict(map(lambda x:(perm[x], x), xrange(len(perm)))) # old idx -> new idx
for k in self._items_lookup: self._items_lookup[k] = perm_table[self._items_lookup[k]]
self.items = map(lambda x: self.items[x], perm)
self._sort_prevcol = col
#self.DeleteAllItems()
self.SetItemCount(len(self.items))
# listctrl_item_col_click()
# class MyCheckListCtrl
class MultiPrepDialog(wx.Dialog):
def __init__(self, parent=None, cm=None):
wx.Dialog.__init__(self, parent=parent, id=wx.ID_ANY, title="Prep multi merge",
size=(1200,600), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.MAXIMIZE_BOX)
mpanel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
mpanel.SetSizer(vbox)
self.txtCM = wx.TextCtrl(mpanel, wx.ID_ANY, size=(450,25), style=wx.TE_MULTILINE)
self.txtCM.SetFont(wx.Font(10, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.NORMAL))
self.txtCM.SetEditable(False)
vbox.Add(self.txtCM, 1, flag=wx.EXPAND|wx.RIGHT)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(wx.StaticText(mpanel, wx.ID_ANY, "Choose group: "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.cmbGroup = wx.ComboBox(mpanel, wx.ID_ANY, size=(100,25), style=wx.CB_READONLY)
hbox1.Add(self.cmbGroup)
self.cmbGroup.Bind(wx.EVT_COMBOBOX, self.cmbGroup_select)
hbox1.Add(wx.StaticText(mpanel, wx.ID_ANY, "Choose symmetry: "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.cmbSymmetry = wx.ComboBox(mpanel, wx.ID_ANY, size=(400,25), style=wx.CB_READONLY)
hbox1.Add(self.cmbSymmetry)
hbox1.Add(wx.StaticText(mpanel, wx.ID_ANY, "Workdir: "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.txtWorkdir = wx.TextCtrl(mpanel, wx.ID_ANY, size=(200,25))
hbox1.Add(self.txtWorkdir)
self.btnProceed = wx.Button(mpanel, wx.ID_ANY, "Proceed")
self.btnCancel = wx.Button(mpanel, wx.ID_ANY, "Cancel")
self.btnProceed.Bind(wx.EVT_BUTTON, self.btnProceed_click)
self.btnCancel.Bind(wx.EVT_BUTTON, lambda e: self.EndModal(wx.OK))
hbox1.Add(self.btnProceed)
hbox1.Add(self.btnCancel)
vbox.Add(hbox1)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(wx.StaticText(mpanel, wx.ID_ANY, "Prepare files in specified symmetry by "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.rbReindex = wx.RadioButton(mpanel, wx.ID_ANY, "reindexing only", style=wx.RB_GROUP)
self.rbReindex.SetToolTipString("Just change H,K,L columns and unit cell parameters in HKL file")
self.rbReindex.SetValue(True)
self.rbPostref = wx.RadioButton(mpanel, wx.ID_ANY, "refinement")
self.rbPostref.SetToolTipString("Run CORRECT job of XDS to refine unit cell and geometric parameters")
hbox2.Add(self.rbReindex, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL)
hbox2.Add(self.rbPostref, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL)
self.chkPrepFilesInWorkdir = wx.CheckBox(mpanel, wx.ID_ANY, "into the workdir")
self.chkPrepFilesInWorkdir.SetToolTipString("When checked, HKL files for merging will be saved in the workdir. Useful when you are trying several symmetry possibilities. Otherwise files are modified in place.")
self.chkPrepFilesInWorkdir.SetValue(True)
hbox2.Add(self.chkPrepFilesInWorkdir, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
hbox2.Add(wx.StaticText(mpanel, wx.ID_ANY, " using "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL)
self.txtNproc = wx.TextCtrl(mpanel, wx.ID_ANY, size=(40,25))
hbox2.Add(self.txtNproc, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL)
hbox2.Add(wx.StaticText(mpanel, wx.ID_ANY, " CPU cores "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.chkPrepDials = wx.CheckBox(mpanel, wx.ID_ANY, "Prepare files for joint refinement by dials")
hbox2.Add(self.chkPrepDials, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
vbox.Add(hbox2)
try: import dials
except ImportError: self.chkPrepDials.Disable()
self.selected = (None, None, None, None, None, None, None)
self.cm = cm
self._set_default_input()
# __init__()
def _set_default_input(self):
self.cmbGroup.Clear()
if self.cm is None: return
for i in xrange(len(self.cm.groups)): self.cmbGroup.Append("%2d"%(i+1))
self.cmbGroup.Select(0)
self.cmbGroup_select(None)
self.txtWorkdir.SetValue(time.strftime("merge_%y%m%d-%H%M%S"))
# _set_default_input()
def cmbGroup_select(self, ev):
self.cmbSymmetry.Clear()
igrp = int(self.cmbGroup.GetValue()) - 1
symms = self.cm.get_selectable_symms(igrp)
def_idx = symms.index(max(symms, key=lambda x:x[2]))
if len(symms) > 1 and symms[def_idx][0].group() == sgtbx.space_group_info("P1").group():
tmp = symms.index(max(filter(lambda x: x!=symms[def_idx], symms), key=lambda x:x[2]))
if symms[tmp][2] > 0: def_idx = tmp
for i, (pg, cell, freq) in enumerate(symms):
self.cmbSymmetry.Append("%-10s (%s)" % (pg, format_unit_cell(cell)))
self.cmbSymmetry.Select(def_idx)
# cmbGroup_select()
def btnProceed_click(self, ev):
prohibit_chars = set(" /*\\")
workdir = self.txtWorkdir.GetValue()
if prohibit_chars.intersection(workdir):
wx.MessageDialog(None, "You can't use following characters for directory name: ' /*\\'",
"Error", style=wx.OK).ShowModal()
return
group, symmidx = int(self.cmbGroup.GetValue()), self.cmbSymmetry.GetCurrentSelection()
# Check workdir
workdir = os.path.join(config.params.workdir, workdir)
try:
os.mkdir(workdir)
except OSError:
wx.MessageDialog(None, "Can't make directory: %s" % os.path.basename(workdir),
"Error", style=wx.OK).ShowModal()
return
try:
nproc = int(self.txtNproc.GetValue())
except ValueError:
wx.MessageDialog(None, "Invalid core number",
"Error", style=wx.OK).ShowModal()
return
self.selected = group, symmidx, workdir, "reindex" if self.rbReindex.GetValue() else "refine", nproc, self.chkPrepDials.GetValue(), self.chkPrepFilesInWorkdir.GetValue()
self.EndModal(wx.OK)
# btnProceed_click()
def ask(self, txt):
self.txtCM.SetValue(txt)
self.txtNproc.SetValue("%s"%libtbx.easy_mp.get_processes(libtbx.Auto))
self.ShowModal()
return self.selected
# ask()
# class MultiPrepDialog
class MultiMergeDialog(wx.Dialog):
def __init__(self, parent=None, xds_ascii_files=[]):
wx.Dialog.__init__(self, parent=parent, id=wx.ID_ANY, title="Multi merge",
size=(600,600), style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
mpanel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
mpanel.SetSizer(vbox)
# __init__()
# class MultiMergeDialog
class ControlPanel(wx.Panel):
def __init__(self, parent=None, id=wx.ID_ANY):
wx.Panel.__init__(self, parent=parent, id=id)
vbox = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(vbox)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(wx.StaticText(self, wx.ID_ANY, "Top Dir: "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.txtTopDir = wx.TextCtrl(self, wx.ID_ANY, size=(450,25))
self.txtTopDir.SetEditable(False)
self.txtTopDir.SetValue(config.params.topdir)
hbox1.Add(self.txtTopDir, flag=wx.EXPAND|wx.RIGHT)
vbox.Add(hbox1)
self.lblDS = wx.StaticText(self, wx.ID_ANY, "?? datasets collected, ?? datasets processed")
vbox.Add(self.lblDS, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(wx.StaticText(self, wx.ID_ANY, "Filter: "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
self.txtFilter = wx.TextCtrl(self, wx.ID_ANY, size=(200,25))
self.txtFilter.SetValue("(will be implemented in future)")
self.txtFilter.Disable()
self.cmbFilter = wx.ComboBox(self, wx.ID_ANY, size=(150,25), style=wx.CB_READONLY)
self.cmbFilter.Bind(wx.EVT_TEXT_ENTER, self.cmbFilter_text_enter)
for n in ("Path",): self.cmbFilter.Append(n)
self.cmbFilter.Disable()
self.btnCheckAll = wx.Button(self, wx.ID_ANY, "Check all")
self.btnUncheckAll = wx.Button(self, wx.ID_ANY, "Uncheck all")
self.btnMultiMerge = wx.Button(self, wx.ID_ANY, "Multi-merge strategy")
self.btnCheckAll.Bind(wx.EVT_BUTTON, self.btnCheckAll_click)
self.btnUncheckAll.Bind(wx.EVT_BUTTON, self.btnUncheckAll_click)
self.btnMultiMerge.Bind(wx.EVT_BUTTON, self.btnMultiMerge_click)
hbox2.Add(self.txtFilter, flag=wx.EXPAND|wx.RIGHT)
hbox2.Add(self.cmbFilter, flag=wx.EXPAND|wx.RIGHT)
hbox2.Add(self.btnCheckAll, flag=wx.EXPAND|wx.RIGHT)
hbox2.Add(self.btnUncheckAll, flag=wx.EXPAND|wx.RIGHT)
hbox2.Add(self.btnMultiMerge, flag=wx.EXPAND|wx.RIGHT)
vbox.Add(hbox2)
self.listctrl = MyCheckListCtrl(self)
self.listctrl.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.listctrl_item_right_click)
self.listctrl.Bind(wx.EVT_LIST_ITEM_SELECTED, self.listctrl_item_selected)
vbox.Add(self.listctrl, 1, flag=wx.EXPAND|wx.TOP)
# __init__()
def on_update(self, ev):
self.update_listctrl()
job_statuses = ev.job_statuses
lc = self.listctrl
n_proc = 0
n_giveup = 0
dumpdata = {}
for i in xrange(lc.GetItemCount()):
key = self.listctrl.key_at(i)
if key not in job_statuses: continue
status, (cmpl, sg, resn) = job_statuses.get(key)
dumpdata[key] = status, (cmpl, sg, resn)
item = lc.get_item(key)
if status is None:
item[6] = "waiting"
else:
item[6] = status
if status == batchjob.STATE_FINISHED: n_proc += 1
if status == "giveup": n_giveup += 1
if cmpl is not None: item[7] = "%3.0f" % cmpl
if sg is not None: item[8] = str(sg)
if resn is not None: item[9] = "%.1f" % resn
lc.update_item(key, item)
lc.SetItemCount(len(lc.items))
self.lblDS.SetLabel("%3d datasets collected (%3d processed, %3d failed, %3d undone) workdir: %s" % (lc.GetItemCount(), n_proc, n_giveup, lc.GetItemCount()-(n_proc+n_giveup), os.path.relpath(config.params.workdir, config.params.topdir)))
pickle.dump(dumpdata, open(os.path.join(config.params.workdir, "proc.pkl"), "wb"), 2)
# on_update()
def update_listctrl(self):
lc = self.listctrl
for prefix, nr in bssjobs.jobs:
lab = "%s (%.4d..%.4d)" % (os.path.relpath(prefix, config.params.topdir), nr[0], nr[1])
job = bssjobs.jobs[(prefix, nr)]
# If exists, don't overwrite with blank data
if lc.get_item((prefix, nr)): continue
item = [lab]
if job.sample is not None: item.append("%s(%.2d)" % job.sample)
else: item.append("none")
item.append("%.4f" % job.wavelength)
item.append("%5.1f" % (job.osc_end - job.osc_start))
item.append("%.3f" % job.osc_step)
item.append(job.status)
item.append("never")
item.append("")
item.append("")
item.append("")
lc.update_item((prefix, nr), item)
assert len(item) == lc.GetColumnCount()
lc.SetItemCount(len(lc.items))
# update_listctrl()
def listctrl_item_right_click(self, ev):
lc = self.listctrl
idx = lc.GetFirstSelected()
key = self.listctrl.key_at(idx)
menu = wx.Menu()
menu.Append(0, lc.GetItem(idx, 0).GetText())
menu.Enable(0, False)
menu.AppendSeparator()
menu.Append(1, "Start processing")
menu.Append(2, "Stop processing")
self.Bind(wx.EVT_MENU, lambda e: bssjobs.process_data(key), id=1)
self.PopupMenu(menu)
menu.Destroy()
# listctrl_item_right_click()
def listctrl_item_selected(self, ev):
idx = self.listctrl.GetFirstSelected()
key = self.listctrl.key_at(idx)
ev = EventShowProcResult(key=key)
wx.PostEvent(mainFrame, ev)
# listctrl_item_selected()
def btnCheckAll_click(self, ev):
for i in xrange(self.listctrl.GetItemCount()):
if self.listctrl.GetItem(i).GetImage() == 0: self.listctrl.SetItemImage(i, 1)
# btnCheckAll_click()
def btnUncheckAll_click(self, ev):
for i in xrange(self.listctrl.GetItemCount()):
if self.listctrl.GetItem(i).GetImage() == 1: self.listctrl.SetItemImage(i, 0)
# btnUncheckAll_click()
def btnMultiMerge_click(self, ev):
keys = map(lambda i: self.listctrl.key_at(i),
filter(lambda i: self.listctrl.GetItem(i).GetImage() == 1, xrange(self.listctrl.GetItemCount())))
keys = filter(lambda k: bssjobs.get_process_status(k)[0]=="finished", keys)
mylog.info("%d finished jobs selected for merging" % len(keys))
if not bssjobs.cell_graph.is_all_included(keys):
busyinfo = wx.lib.agw.pybusyinfo.PyBusyInfo("Thinking..", title="Busy KAMO")
try: wx.SafeYield()
except: pass
while not bssjobs.cell_graph.is_all_included(keys):
print "waiting.."
time.sleep(1)
busyinfo = None
if len(keys) == 0:
wx.MessageDialog(None, "No successfully finished job in the selection",
"Error", style=wx.OK).ShowModal()
return
cm = bssjobs.cell_graph.get_subgraph(keys)
from yamtbx.dataproc.auto.command_line.multi_prep_merging import PrepMerging
pm = PrepMerging(cm)
ask_str = pm.find_groups()
if len(cm.groups) == 0:
wx.MessageDialog(None, "Oh, no. No data",
"Error", style=wx.OK).ShowModal()
return
mpd = MultiPrepDialog(cm=cm)
group, symmidx, workdir, cell_method, nproc, prep_dials_files, into_workdir = mpd.ask(ask_str)
mpd.Destroy()
if None in (group,symmidx):
mylog.info("Canceled")
return
msg, _ = pm.prep_merging(workdir=workdir, group=group, symmidx=symmidx,
topdir=config.params.workdir,
cell_method=cell_method,
nproc=nproc, prep_dials_files=prep_dials_files, into_workdir=into_workdir)
pm.write_merging_scripts(workdir, config.params.batch.sge_pe_name, prep_dials_files)
print "\nFrom here, Do It Yourself!!\n"
print "cd", workdir
print "..then edit and run merge_blend.sh and/or merge_ccc.sh"
print
wx.MessageDialog(None, "Now ready. From here, please use command-line. Look at your terminal..\n" + msg,
"Ready for merging", style=wx.OK).ShowModal()
# Merge
#mmd = MultiMergeDialog(workdir, xds_ascii_files)
#mmd.ShowModal()
# btnMultiMerge_click()
def cmbFilter_text_enter(self, ev):
# XXX Doesn't work!
s = self.cmbFilter.GetValue()
if s == "": return
# cmbFilter_text_enter()
# class ControlPanel()
class ResultLeftPanel(wx.Panel):
def __init__(self, parent=None, id=wx.ID_ANY):
wx.Panel.__init__(self, parent=parent, id=id)
self.current_key = None
self.current_workdir = None
vbox = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(vbox)
self.summaryHtml = wx.html.HtmlWindow(self, style=wx.NO_BORDER, size=(600,90))
self.summaryHtml.SetStandardFonts()
vbox.Add(self.summaryHtml, 1, flag=wx.EXPAND)
sbImage = wx.StaticBox(self, label="Check images")
sbsImage = wx.StaticBoxSizer(sbImage, wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(wx.StaticText(self, label="Raw data: frame "), flag=wx.RIGHT|wx.ALIGN_CENTER_VERTICAL)
self.txtRawFrame = wx.TextCtrl(self, value="1")
self.spinRawFrame = wx.SpinButton(self, style=wx.SP_VERTICAL)
self.btnRawShow = wx.Button(self, wx.ID_ANY, "Show")
hbox1.Add(self.txtRawFrame)
hbox1.Add(self.spinRawFrame)
hbox1.Add(self.btnRawShow)
sbsImage.Add(hbox1)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(wx.StaticText(self, label="Prediction: frame "), flag=wx.RIGHT|wx.ALIGN_CENTER_VERTICAL)
self.txtPredictFrame = wx.TextCtrl(self, value="1")
self.spinPredictFrame = wx.SpinButton(self, style=wx.SP_VERTICAL)
self.btnPredictShow = wx.Button(self, wx.ID_ANY, "Show")
hbox2.Add(self.txtPredictFrame)
hbox2.Add(self.spinPredictFrame)
hbox2.Add(self.btnPredictShow)
sbsImage.Add(hbox2)
vbox.Add(sbsImage, flag=wx.EXPAND|wx.ALL, border=1)
self.spinRawFrame.Bind(wx.EVT_SPIN, self.spinRawFrame_spin)
self.spinPredictFrame.Bind(wx.EVT_SPIN, self.spinPredictFrame_spin)
self.btnRawShow.Bind(wx.EVT_BUTTON, self.btnRawShow_click)
self.btnPredictShow.Bind(wx.EVT_BUTTON, self.btnPredictShow_click)
# __init__()
def set_current_key(self, key):
self.current_key = key
for obj in (self.spinRawFrame, self.spinPredictFrame):
obj.SetRange(*key[1])
obj.SetValue(key[1][1])
self.txtRawFrame.SetValue(str(self.spinRawFrame.GetValue()))
self.txtPredictFrame.SetValue(str(self.spinPredictFrame.GetValue()))
# set_current_key()
def set_current_workdir(self, wd): self.current_workdir = wd
def btnRawShow_click(self, ev, raise_window=True):
frame = int(self.txtRawFrame.GetValue())
job = bssjobs.get_job(self.current_key)
if job is None: return
masterh5 = job.get_master_h5_if_exists()
if masterh5:
mainFrame.adxv.open_hdf5(masterh5, frame, raise_window=raise_window)
else:
path = dataset.template_to_filenames(job.filename, frame, frame)[0]
mainFrame.adxv.open_image(path, raise_window=raise_window)
# btnPredictShow_click()
def spinRawFrame_spin(self, ev):
self.txtRawFrame.SetValue(str(self.spinRawFrame.GetValue()))
if mainFrame.adxv.is_alive():
wx.CallAfter(self.btnRawShow_click, None, False)
# spinRawFrame_spin()
def btnPredictShow_click(self, ev, raise_window=True):
frame = int(self.txtPredictFrame.GetValue())
prefix, nr = self.current_key
if self.current_workdir is None: return
if frame == self.spinPredictFrame.GetMax():
framecbf = os.path.join(self.current_workdir,"FRAME.cbf")
else:
framecbf = os.path.join(self.current_workdir, "FRAME_%.4d.cbf" % frame)
if not os.path.isfile(framecbf): # TODO check timestamp and recalculate if needed
from yamtbx.dataproc.xds.command_line import xds_predict_mitai
busyinfo = wx.lib.agw.pybusyinfo.PyBusyInfo("Calculating prediction..", title="Busy KAMO")
try: wx.SafeYield()
except: pass
try:
xds_predict_mitai.run(param_source=os.path.join(self.current_workdir, "INTEGRATE.LP"),
frame_num=frame, wdir=self.current_workdir)
finally:
busyinfo = None
mainFrame.adxv.open_image(framecbf, raise_window=raise_window)
# btnPredictShow_click()
def spinPredictFrame_spin(self, ev):
self.txtPredictFrame.SetValue(str(self.spinPredictFrame.GetValue()))
if mainFrame.adxv.is_alive():
wx.CallAfter(self.btnPredictShow_click, None, False)
# spinPredictFrame_spin()
def update_summary(self, job, result):
prefix = os.path.relpath(job.filename, config.params.topdir)
startframe, endframe = self.current_key[1]
osc, exptime, clen = job.osc_step, job.exp_time, job.distance
att = "%s %d um" % job.attenuator
# Move this somewhere!
len_edge = {"CCD (MX225HS)":225./2., }.get(job.detector, 0)
if len_edge > 0: edgeresn = job.wavelength / 2. / numpy.sin(numpy.arctan(len_edge/job.distance)/2.)
else: edgeresn = float("nan")
exc_ranges_strs = []
for lr, rr in result["exclude_data_ranges"]:
if lr==rr: exc_ranges_strs.append("%d"%lr)
else: exc_ranges_strs.append("%d-%d"%(lr,rr))
exc_ranges = ", ".join(exc_ranges_strs)
if not exc_ranges_strs: exc_ranges = "(none)"
html_str = """\
<b>Quick Summary</b><br>
<table>
<tr align="left"><th>Files</th><td>%(prefix)s (%(startframe)4d .. %(endframe)4d)</td></tr>
<tr align="left"><th>Conditions</th><td>DelPhi= %(osc).3f°, Exp= %(exptime).3f s, Distance= %(clen).1f mm (%(edgeresn).1f A), Att= %(att)s</td></tr>
<tr align="left"><th>Excluded frames</th><td>%(exc_ranges)s</td></tr>
""" % locals()
decilog = os.path.join(result.get("workdir", ""), "decision.log")
log_lines = []
if os.path.isfile(decilog):
log_lines = open(decilog).readlines()[2:-1]
ISa = "%.2f"%result["ISa"] if "ISa" in result else "n/a"
cell_str = ", ".join(map(lambda x: "%.2f"%x,result["cell"])) if "cell" in result else "?"
sg = result.get("sg", "?")
symm_warning = ""
if log_lines and any(map(lambda x: "WARNING: symmetry in scaling is different from Pointless" in x, log_lines)):
symm_warning = " (WARNING: see log)"
html_str += """\
<tr align="left"><th>ISa</th><td>%(ISa)s</td></tr>
<tr align="left"><th>Symmetry</th><td>%(sg)s : %(cell_str)s%(symm_warning)s</td></tr>
</table>
""" % locals()
if "table_html" in result: html_str += "<pre>%s</pre>" % result["table_html"]
if log_lines:
html_str += "<br><br><b>Log</b><br><pre>%s</pre>" % "".join(log_lines)
self.summaryHtml.SetPage(html_str)
# update_summary()
# class ResultLeftPanel
class PlotPanel(wx.lib.scrolledpanel.ScrolledPanel): # Why this needs to be ScrolledPanel?? (On Mac, Panel is OK, but not works on Linux..)
def __init__(self, parent=None, id=wx.ID_ANY, nplots=4):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent=parent, id=id, size=(400,1200))
vbox = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(vbox)
self.figure = matplotlib.figure.Figure(tight_layout=True)
self.subplots = map(lambda i: self.figure.add_subplot(nplots,1,i+1), xrange(nplots))
self.p = map(lambda x:[], xrange(nplots))
self.lim = map(lambda i: dict(x=[], y=[]), xrange(nplots))
self.canvas = matplotlib.backends.backend_wxagg.FigureCanvasWxAgg(self, wx.ID_ANY, self.figure)
vbox.Add(self.canvas, 1, flag=wx.ALL|wx.EXPAND)
# __init__
"""
def _SetSize(self):
size = self.GetClientSize()
print "psize=",size
size[1] //= 2
self.SetSize(size)
self.canvas.SetSize(size)
self.figure.set_size_inches(float(size[0])/self.figure.get_dpi(),
float(size[1])/self.figure.get_dpi())
#self.Fit()
# _SetSize()
"""
def clear_plot(self):
for i in xrange(len(self.p)):
for p in self.p[i]:
for pp in p: pp.remove()
self.p[i] = []
self.lim[i]["x"], self.lim[i]["y"] = [], []
# clear_plot()
def add_plot(self, n, x, y, label="", marker="o", color="blue", show_legend=True):
p = self.subplots[n].plot(x, y, marker=marker, label=label, color=color)
self.p[n].append(p)
# Define range
for k, v in (("x",x), ("y", y)):
if self.lim[n][k] == []: self.lim[n][k] = [min(v), max(v)]
else: self.lim[n][k] = [min(min(v), self.lim[n][k][0]), max(max(v), self.lim[n][k][1])]
self.subplots[n].set_xlim(*self.lim[n]["x"])
yrange = self.lim[n]["y"][1] - self.lim[n]["y"][0]
self.subplots[n].set_ylim(self.lim[n]["y"][0]-0.2*yrange/2, self.lim[n]["y"][0]+2.2*yrange/2) # 1-factor, 1+factor
if show_legend:
self.subplots[n].legend(loc='best').draggable(True)
# plot()
def refresh(self):
self.SetSize((self.Size[0],self.Size[1]))
self.canvas.draw()
# class PlotPanel
class ResultRightPanel(wx.Panel):
def __init__(self, parent=None, id=wx.ID_ANY):
wx.Panel.__init__(self, parent=parent, id=id)
self.current_key = None
self.current_workdir = None
vbox = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(vbox)
self.notebook = wx.Notebook(self, id=wx.ID_ANY, style=wx.BK_DEFAULT)
vbox.Add(self.notebook, 1, wx.ALL|wx.EXPAND, 5)
self.plotsPanel = wx.lib.scrolledpanel.ScrolledPanel(self.notebook)
self.plotsPanel.SetupScrolling()
self.logPanel = wx.Panel(self.notebook)
self.notebook.AddPage(self.plotsPanel, "Plots")
self.notebook.AddPage(self.logPanel, "Log files")
# Panel for plots
pvbox = wx.BoxSizer(wx.VERTICAL)
self.plotsPanel.SetSizer(pvbox)
self.plots = PlotPanel(self.plotsPanel, nplots=4)
pvbox.Add(self.plots, 0, flag=wx.ALL|wx.EXPAND)
# Panel for log files
lvbox = wx.BoxSizer(wx.VERTICAL)
self.logPanel.SetSizer(lvbox)
lhbox1 = wx.BoxSizer(wx.HORIZONTAL)
lvbox.Add(lhbox1)
self.cmbLog = wx.ComboBox(self.logPanel, wx.ID_ANY, style=wx.CB_READONLY)
self.txtLog = wx.TextCtrl(self.logPanel, wx.ID_ANY, size=(450,25), style=wx.TE_MULTILINE|wx.TE_DONTWRAP|wx.TE_READONLY)
self.txtLog.SetFont(wx.Font(10, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.NORMAL))
self.lblLog = wx.StaticText(self.logPanel, wx.ID_ANY, "")
lhbox1.Add(wx.StaticText(self.logPanel, wx.ID_ANY, "Log file: "), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL)
lhbox1.Add(self.cmbLog)
lhbox1.Add(self.lblLog, flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
lvbox.Add(self.txtLog, 1, flag=wx.EXPAND|wx.ALL)
self.cmbLog.Bind(wx.EVT_COMBOBOX, self.cmbLog_select)
# __init__()
def set_current_key(self, key):
self.current_key = key
# set_current_key()
def set_current_workdir(self, wd):
self.current_workdir = wd
i_plot = -1
# Plot stuff
self.plots.clear_plot()
spot_xds = os.path.join(wd, "SPOT.XDS")
if os.path.isfile(spot_xds):
sx = idxreflp.SpotXds(spot_xds)
spots = sx.indexed_and_unindexed_by_frame()
if spots:
spots_f = map(lambda x: x[0], spots)
spots_n = map(lambda x: x[1][0]+x[1][1], spots)
self.plots.add_plot(0, spots_f, spots_n, label="Spots", color="blue")
spots_n = map(lambda x: x[1][0], spots)
if sum(spots_n) > 0:
self.plots.add_plot(0, spots_f, spots_n, label="Indexed", color="green")
if config.params.engine == "xds":
integrate_lp = os.path.join(wd, "INTEGRATE.LP")
xdsstat_lp = os.path.join(wd, "XDSSTAT.LP")
if os.path.isfile(integrate_lp):
lp = integratelp.IntegrateLp(integrate_lp)
# SgimaR
self.plots.add_plot(1, map(int,lp.frames), map(float,lp.sigmars), label=("SigmaR"))
# Rotations
xs, ys = [], [[], [], []]
for frames, v in lp.blockparams.items():
rots = map(float, v.get("rotation", ["nan"]*3))
assert len(rots) == 3
if len(frames) > 1:
xs.extend([frames[0], frames[-1]])
for i in xrange(3): ys[i].extend([rots[i],rots[i]])
else:
xs.append(frames[0])
for i in xrange(3): ys[i].append(rots[i])
for i, y in enumerate(ys):
self.plots.add_plot(2, xs, y, label=("rotx","roty","rotz")[i], color=("red", "green", "blue")[i])
if os.path.isfile(xdsstat_lp):
lp = xdsstat.XdsstatLp(xdsstat_lp)
if lp.by_frame:
# R-meas
self.plots.add_plot(3, map(int,lp.by_frame["frame"]),
map(float,lp.by_frame["rmeas"]), label=("R-meas"))
elif config.params.engine == "dials":
pass
self.plots.refresh()
# Log file stuff
prev_cmbLog_sel = self.cmbLog.GetValue()
to_append = []
if config.params.engine == "xds":
for j in ("XYCORR", "INIT", "COLSPOT", "IDXREF", "DEFPIX", "XPLAN", "INTEGRATE", "CORRECT"):
for f in glob.glob(os.path.join(wd, "%s*.LP"%j)):
to_append.append(os.path.basename(f))
elif config.params.engine == "dials":
for j in ("import", "find_spots", "index", "integrate", "export"):
f = os.path.join(wd, "dials.%s.debug.log"%j)
if os.path.isfile(f): to_append.append(os.path.basename(f))
self.cmbLog.Clear()
self.cmbLog.AppendItems(to_append)
if prev_cmbLog_sel and prev_cmbLog_sel in to_append:
self.cmbLog.Select(to_append.index(prev_cmbLog_sel))
elif "CORRECT.LP" in to_append:
self.cmbLog.Select(to_append.index("CORRECT.LP"))
else:
self.cmbLog.Select(self.cmbLog.GetCount() - 1)
self.cmbLog_select(None)
# set_current_workdir()
def cmbLog_select(self, ev):
if self.current_workdir is None: return
lpfile = os.path.join(self.current_workdir, self.cmbLog.GetValue())
if not os.path.isfile(lpfile): return
self.lblLog.SetLabel("Modified: %s" % time.ctime(os.path.getmtime(lpfile)))
self.txtLog.SetValue(open(lpfile).read())
# cmbLog_select()
# class ResultRightPanel
class MainFrame(wx.Frame):
def __init__(self, parent=None, id=wx.ID_ANY, topdir=None):
wx.Frame.__init__(self, parent=parent, id=id, title="KAMO system started at %s" % time.strftime("%Y-%m-%d %H:%M:%S"),
size=(1500,950))
self.adxv = Adxv(adxv_bin=config.params.adxv)
# Main splitter
self.splitter = wx.SplitterWindow(self, id=wx.ID_ANY)
self.ctrlPanel = ControlPanel(self.splitter)
self.splitter2 = wx.SplitterWindow(self.splitter, id=wx.ID_ANY)
self.resultLPanel = ResultLeftPanel(self.splitter2)
self.resultRPanel = ResultRightPanel(self.splitter2)
self.splitter2.SplitVertically(self.resultLPanel, self.resultRPanel)
self.splitter2.SetSashGravity(0.5)
self.splitter2.SetMinimumPaneSize(10)
self.splitter.SplitHorizontally(self.ctrlPanel, self.splitter2)
self.splitter.SetSashGravity(0.5)
self.splitter.SetSashPosition(300) # want to delete this line for Mac, but then unhappy on linux..
self.splitter.SetMinimumPaneSize(10)
self.Bind(EVT_SHOW_PROC_RESULT, self.show_proc_result)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.watch_log_thread = WatchLogThread(self)
self.Bind(EVT_LOGS_UPDATED, self.ctrlPanel.on_update)
if config.params.jobspkl is not None: config.params.logwatch_once = True
self.watch_log_thread.start(config.params.logwatch_interval)
self.Show()
# __init__()
def onClose(self, ev):
self.Destroy()
# onClose()
def show_proc_result(self, ev):
key = ev.key
prefix, nr = key
result = bssjobs.get_process_result(key)
for obj in (self.resultLPanel, self.resultRPanel):
obj.set_current_key(key)
obj.set_current_workdir(result["workdir"])
self.resultLPanel.update_summary(job=bssjobs.get_job(key), result=result)
# show_proc_result()
# class MainFrame
def run_from_args(argv):
# Not used in this script, but required in KAMO.
#import scipy
import networkx
global batchjobs
global mainFrame
global bssjobs
print """
KAMO (Katappashikara Atsumeta data wo Manual yorimoiikanjide Okaeshisuru) system is an automated data processing system for SPring-8 beamlines.
This is an alpha-version. If you found something wrong, please let staff know! We would appreciate your feedback.
* Use cases (options) *
- Attention! when not small-wedge mode (normal data collection), small_wedges=false is needed!!
- If you don't want to use SGE, batch.engine=sh is required.
1) On beamline, on-line data processing along with data collection
bl=32xu small_wedges=false [workdir=_kamoproc]
2) With ZOO system on BL32XU
bl=32xu mode=zoo [workdir=_kamoproc]
3) To process already-collected data (off-line & directory search mode)
bl=other [include_dir=dirs.lst]
** This program must be started in the top directory of your datasets! **
(Only processes the data in the subdirectories)
"""
if "-h" in argv or "--help" in argv:
print "All parameters:\n"
iotbx.phil.parse(gui_phil_str).show(prefix=" ", attributes_level=1)
return
cmdline = iotbx.phil.process_command_line(args=argv,
master_string=gui_phil_str)
config.params = cmdline.work.extract()
args = cmdline.remaining_args
if config.params.bl is None:
print "ERROR: bl= is needed."
return
app = wx.App()
from yamtbx.command_line import kamo_test_installation
if config.params.engine == "xds" and not kamo_test_installation.tst_xds():
if wx.MessageDialog(None, "You selected XDS, but XDS is not installed or expired at least in this computer. Proceed anyway?",
"Warning", style=wx.OK|wx.CANCEL).ShowModal() == wx.ID_CANCEL:
return
# Setup logging
mylog.config(beamline=config.params.bl, log_root=config.params.log_root)
mylog.info("Program started in %s." % os.getcwd())
if len(config.params.include_dir) > 0 and len(config.params.exclude_dir) > 0:
mylog.error("Can't specify both include_dir= and exclude_dir")
return
for arg in args:
if config.params.topdir is None and os.path.isdir(arg):
config.params.topdir = os.path.abspath(arg)
elif not os.path.exists(arg):
mylog.error("Given path does not exist: %s" % arg)
return
if (config.params.known.space_group, config.params.known.unit_cell).count(None) == 1:
mylog.error("Specify both space_group and unit_cell!")
return
# Test known crystal symmetry given
if config.params.known.space_group is not None:
try:
xs = crystal.symmetry(config.params.known.unit_cell, config.params.known.space_group)
if not xs.change_of_basis_op_to_reference_setting().is_identity_op():
xs_refset = xs.as_reference_setting()
mylog.error('Sorry. Currently space group in non-reference setting is not supported. In this case please give space_group=%s unit_cell="%s" instead.' % (str(xs_refset.space_group_info()).replace(" ",""), format_unit_cell(xs_refset.unit_cell())))
return
except:
mylog.error("Invalid crystal symmetry. Check space_group= and unit_cell=.")
return
if config.params.xds.reverse_phi is not None:
if config.params.xds.use_dxtbx: # rotation axis is determined by dxtbx
mylog.error("When use_dxtbx=true, you cannot specify reverse_phi= option")
return
if config.params.xds.override.rotation_axis:
mylog.error("When xds.override.rotation_axis= is given, you cannot specify reverse_phi= option")
return
if config.params.topdir is None: config.params.topdir = os.getcwd()
if not os.path.isabs(config.params.topdir):
config.params.topdir = os.path.abspath(config.params.topdir)
if len(config.params.include_dir) == 1 and os.path.isfile(config.params.include_dir[0]):
config.params.include_dir = read_path_list(config.params.include_dir[0])
if len(config.params.exclude_dir) == 1 and os.path.isfile(config.params.exclude_dir[0]):
config.params.exclude_dir = read_path_list(config.params.exclude_dir[0])
for i, d in enumerate(config.params.include_dir):
if not os.path.isabs(d): config.params.include_dir[i] = os.path.join(config.params.topdir, d)
for i, d in enumerate(config.params.exclude_dir):
if not os.path.isabs(d): config.params.exclude_dir[i] = os.path.join(config.params.topdir, d)
if not os.path.exists(config.params.workdir):
os.makedirs(config.params.workdir)
if not os.path.isabs(config.params.workdir):
config.params.workdir = os.path.abspath(config.params.workdir)
mylog.add_logfile(os.path.join(config.params.workdir, "kamo_gui.log"))
mylog.info("Starting GUI in %s" % config.params.workdir)
# Save params
savephilpath = os.path.join(config.params.workdir, time.strftime("gui_params_%y%m%d-%H%M%S.txt"))
with open(savephilpath, "w") as ofs:
ofs.write("# Command-line args:\n")
ofs.write("# kamo %s\n\n" % " ".join(map(lambda x: pipes.quote(x), argv)))
libtbx.phil.parse(gui_phil_str).format(config.params).show(out=ofs,
prefix="")
mylog.info("GUI parameters were saved as %s" % savephilpath)
if config.params.batch.engine == "sge":
try:
batchjobs = batchjob.SGE(pe_name=config.params.batch.sge_pe_name)
except batchjob.SgeError, e:
mylog.error(e.message)
mylog.error("SGE not configured. If you want to run KAMO on your local computer only (not to use queueing system), please specify batch.engine=sh")
return
elif config.params.batch.engine == "sh":
if config.params.batch.sh_max_jobs == libtbx.Auto:
nproc_all = libtbx.easy_mp.get_processes(None)
mylog.info("Automatically adjusting batch.sh_max_jobs based on available CPU number (%d)" % nproc_all)
if nproc_all > config.params.batch.nproc_each:
config.params.batch.sh_max_jobs = nproc_all // config.params.batch.nproc_each
else:
config.params.batch.nproc_each = nproc_all
config.params.batch.sh_max_jobs = 1
batchjobs = batchjob.ExecLocal(max_parallel=config.params.batch.sh_max_jobs)
else:
raise "Unknown batch engine: %s" % config.params.batch.engine
if "normal" in config.params.mode and config.params.bl != "other":
config.params.blconfig.append("/isilon/blconfig/bl%s" % config.params.bl)
if "zoo" in config.params.mode:
config.params.blconfig.append("/isilon/BL32XU/BLsoft/PPPP/10.Zoo/ZooConfig")
if config.params.logwatch_target == "dataset_paths_txt" and not config.params.dataset_paths_txt:
if config.params.bl == "other":
mylog.info("bl=other and dataset_paths_txt not specified. changing a parameter logwatch_target= to local.")
config.params.logwatch_target = "local"
else:
blname = "BL" + config.params.bl.upper()
config.params.dataset_paths_txt = os.path.join(os.path.expanduser("~"), ".dataset_paths_for_kamo_%s.txt"%blname)
mylog.info("Changing a parameter dataset_paths_txt= to %s" % config.params.dataset_paths_txt)
if config.params.logwatch_once is None:
config.params.logwatch_once = (config.params.bl == "other" and not config.params.dataset_paths_txt)
bssjobs = BssJobs()
if config.params.xds.override.geometry_reference:
bssjobs.load_override_geometry(config.params.xds.override.geometry_reference)
mainFrame = MainFrame(parent=None, id=wx.ID_ANY)
app.TopWindow = mainFrame
app.MainLoop()
mylog.info("Normal exit.")
if __name__ == "__main__":
import sys
run_from_args(sys.argv[1:])
| bsd-3-clause |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-128-transformer/dataset_preproc/data_generators/video_generated.py | 7 | 5683 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for video problems with artificially generated frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensor2tensor.data_generators import video_utils
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError:
pass
@registry.register_problem
class VideoStochasticShapes10k(video_utils.VideoProblem):
"""Shapes moving in a stochastic way."""
@property
def is_generate_per_split(self):
"""Whether we have a train/test split or just hold out data."""
return False # Just hold out some generated data for evals.
@property
def frame_height(self):
return 64
@property
def frame_width(self):
return 64
@property
def total_number_of_frames(self):
# 10k videos
return 10000 * self.video_length
@property
def video_length(self):
return 5
@property
def random_skip(self):
return False
def eval_metrics(self):
eval_metrics = [metrics.Metrics.ACC, metrics.Metrics.ACC_PER_SEQ,
metrics.Metrics.IMAGE_RMSE]
return eval_metrics
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
}
decoders = {
"frame_number": tf.contrib.slim.tfexample_decoder.Tensor(
tensor_key="frame_number"),
}
return data_fields, decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {
"inputs": ("video", 256),
"input_frame_number": ("symbol:identity", 1)
}
p.target_modality = {
"targets": ("video", 256),
}
@staticmethod
def get_circle(x, y, z, c, s):
"""Draws a circle with center(x, y), color c, size s and z-order of z."""
cir = plt.Circle((x, y), s, fc=c, zorder=z)
return cir
@staticmethod
def get_rectangle(x, y, z, c, s):
"""Draws a rectangle with center(x, y), color c, size s and z-order of z."""
rec = plt.Rectangle((x-s, y-s), s*2.0, s*2.0, fc=c, zorder=z)
return rec
@staticmethod
def get_triangle(x, y, z, c, s):
"""Draws a triangle with center (x, y), color c, size s and z-order of z."""
points = np.array([[0, 0], [s, s*math.sqrt(3.0)], [s*2.0, 0]])
tri = plt.Polygon(points + [x-s, y-s], fc=c, zorder=z)
return tri
def generate_stochastic_shape_instance(self):
"""Yields one video of a shape moving to a random direction.
The size and color of the shapes are random but
consistent in a single video. The speed is fixed.
Raises:
ValueError: The frame size is not square.
"""
if self.frame_height != self.frame_width or self.frame_height % 2 != 0:
raise ValueError("Generator only supports square frames with even size.")
lim = 10.0
direction = np.array([[+1.0, +1.0],
[+1.0, +0.0],
[+1.0, -1.0],
[+0.0, +1.0],
[+0.0, -1.0],
[-1.0, +1.0],
[-1.0, +0.0],
[-1.0, -1.0]
])
sp = np.array([lim/2.0, lim/2.0])
rnd = np.random.randint(len(direction))
di = direction[rnd]
colors = ["b", "g", "r", "c", "m", "y"]
color = np.random.choice(colors)
shape = np.random.choice([
VideoStochasticShapes10k.get_circle,
VideoStochasticShapes10k.get_rectangle,
VideoStochasticShapes10k.get_triangle])
speed = 1.0
size = np.random.uniform(0.5, 1.5)
back_color = str(0.0)
plt.ioff()
xy = np.array(sp)
for _ in range(self.video_length):
fig = plt.figure()
fig.set_dpi(self.frame_height//2)
fig.set_size_inches(2, 2)
ax = plt.axes(xlim=(0, lim), ylim=(0, lim))
# Background
ax.add_patch(VideoStochasticShapes10k.get_rectangle(
0.0, 0.0, -1.0, back_color, 25.0))
# Foreground
ax.add_patch(shape(xy[0], xy[1], 0.0, color, size))
plt.axis("off")
plt.tight_layout(pad=-2.0)
fig.canvas.draw()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
image = np.copy(np.uint8(image))
plt.close()
xy += speed * di
yield image
def generate_samples(self, data_dir, tmp_dir, unused_dataset_split):
counter = 0
done = False
while not done:
for frame_number, frame in enumerate(
self.generate_stochastic_shape_instance()):
if counter >= self.total_number_of_frames:
done = True
break
yield {"frame": frame, "frame_number": [frame_number]}
counter += 1
| apache-2.0 |
sachinpro/sachinpro.github.io | tensorflow/contrib/learn/__init__.py | 1 | 1880 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@TensorFlowClassifier
@@TensorFlowDNNClassifier
@@TensorFlowDNNRegressor
@@TensorFlowEstimator
@@TensorFlowLinearClassifier
@@TensorFlowLinearRegressor
@@TensorFlowRNNClassifier
@@TensorFlowRNNRegressor
@@TensorFlowRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
| apache-2.0 |
xumi1993/seispy | seispy/sviewerui.py | 1 | 6007 | import sys
import os
import argparse
# matplotlib.use("Qt5Agg")
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, \
QSizePolicy, QWidget, QDesktopWidget, \
QPushButton, QHBoxLayout, QFileDialog, \
QAction, QShortcut
from os.path import exists, dirname, join
from seispy.setuplog import setuplog
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class SFigure(Figure):
def __init__(self, eqs, para, width=21, height=11, dpi=100):
self.eqs = eqs
self.row_num = eqs.shape[0]
self.para = para
self.log = setuplog()
self.idx = 0
self.init_figure(width=width, height=height, dpi=dpi)
self.plot()
self.drop_lst = []
self.drop_color = 'lightgray'
def init_figure(self, width=21, height=11, dpi=100):
self.fig, self.axs = plt.subplots(3, 1, sharex=True, figsize=(width, height), dpi=dpi, tight_layout=True)
def set_properties(self):
date = self.eqs.iloc[self.idx]['date'].strftime("%Y.%m.%dT%H:%M:%S")
dis = self.eqs.iloc[self.idx]['dis']
bazi = self.eqs.iloc[self.idx]['bazi']
mag = self.eqs.iloc[self.idx]['mag']
title = '({}/{}) {}, Baz: {:.2f}$^{{\circ}}$, Distance:{:.2f} km, Mw: {:.1f}'.format(self.idx+1, self.row_num, date, bazi, dis, mag)
self.fig.suptitle(title)
for ax in self.axs:
ax.set(xlim=[-self.para.time_before, self.para.time_after])
ax.minorticks_on()
self.axs[2].set_xlabel('Time (s)')
def clear(self):
for ax in self.axs:
ax.cla()
def plot(self, lc='tab:blue'):
st = self.eqs.iloc[self.idx]['data'].rf
self.time_axis = st[0].times()-self.para.time_before
for i in range(3):
self.axs[i].plot(self.time_axis, st[i].data, color=lc)
self.axs[i].axvline(x=0, lw=1, ls='--', color='r')
self.axs[i].set_ylabel(st[i].stats.channel)
self.set_properties()
def next_action(self):
self.clear()
self.idx += 1
if self.idx >= self.row_num:
self.idx = 0
if self.eqs.index[self.idx] in self.drop_lst:
self.plot(lc=self.drop_color)
else:
self.plot()
def back_action(self):
self.clear()
self.idx -= 1
if self.idx < 0 :
self.idx = 0
if self.eqs.index[self.idx] in self.drop_lst:
self.plot(lc=self.drop_color)
else:
self.plot()
def drop(self):
if self.eqs.index[self.idx] in self.drop_lst:
return
self.drop_lst.append(self.eqs.index[self.idx])
self.plot(lc=self.drop_color)
def cancel(self):
if self.eqs.index[self.idx] not in self.drop_lst:
return
self.drop_lst.remove(self.eqs.index[self.idx])
self.plot()
def finish(self):
self.eqs.drop(self.drop_lst, inplace=True)
class MyMplCanvas(FigureCanvas):
def __init__(self, parent=None, eqs=None, para=None, width=21, height=11, dpi=100):
plt.rcParams['axes.unicode_minus'] = False
self.sfig = SFigure(eqs, para, width=width, height=height, dpi=dpi)
FigureCanvas.__init__(self, self.sfig.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class MatplotlibWidget(QMainWindow):
def __init__(self, eqs, para, parent=None):
super(MatplotlibWidget, self).__init__(parent)
self.initUi(eqs, para)
def initUi(self, eqs, para):
self.mpl = MyMplCanvas(self, eqs=eqs, para=para, width=21, height=11, dpi=100)
self.layout = QVBoxLayout()
self.layout.addWidget(self.mpl, 2)
main_frame = QWidget()
self.setCentralWidget(main_frame)
main_frame.setLayout(self.layout)
self._set_geom_center()
self._define_global_shortcuts()
self.setWindowTitle('PickSPhease')
self.setWindowIcon(QIcon(join(dirname(__file__), 'data', 'seispy.png')))
def exit_app(self):
self.close()
def next_connect(self):
self.mpl.sfig.next_action()
self.mpl.draw()
def back_connect(self):
self.mpl.sfig.back_action()
self.mpl.draw()
def drop_connect(self):
self.mpl.sfig.drop()
self.mpl.draw()
def cancel_connect(self):
self.mpl.sfig.cancel()
self.mpl.draw()
def finish_connect(self):
self.mpl.sfig.finish()
QApplication.quit()
def _define_global_shortcuts(self):
self.key_c = QShortcut(QKeySequence('c'), self)
self.key_c.activated.connect(self.next_connect)
self.key_z = QShortcut(QKeySequence('z'), self)
self.key_z.activated.connect(self.back_connect)
self.key_d = QShortcut(QKeySequence('d'), self)
self.key_d.activated.connect(self.drop_connect)
self.key_a = QShortcut(QKeySequence('a'), self)
self.key_a.activated.connect(self.cancel_connect)
self.key_enter = QShortcut(QKeySequence('Return'), self)
self.key_enter.activated.connect(self.finish_connect)
def _set_geom_center(self, height=0.7, width=1):
screen_resolution = QDesktopWidget().screenGeometry()
screen_height = screen_resolution.height()
screen_width = screen_resolution.width()
frame_height = int(screen_height * height)
frame_width = int(screen_width * width)
self.setGeometry(0, 0, frame_width, frame_height)
self.move((screen_width / 2) - (self.frameSize().width() / 2),
(screen_height / 2) - (self.frameSize().height() / 2)) | gpl-3.0 |
rrohan/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
bikong2/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
daeilkim/refinery | refinery/bnpy/bnpy-dev/bnpy/__init__.py | 1 | 2170 | ''' bnpy module __init__ file
'''
import data
import distr
import util
import suffstats
import allocmodel
import obsmodel
from HModel import HModel
import ioutil
load_model = ioutil.ModelReader.load_model
save_model = ioutil.ModelWriter.save_model
import init
import learnalg
import Run
from Run import run
import os
import sys
'''
########################################################### Configure save
########################################################### location
hasWriteableOutdir = False
if 'BNPYOUTDIR' in os.environ:
outdir = os.environ['BNPYOUTDIR']
if os.path.exists(outdir):
try:
with open(os.path.join(outdir, 'bnpytest'), 'w') as f:
pass
except IOError:
sys.exit('BNPYOUTDIR not writeable: %s' % (outdir))
hasWriteableOutdir = True
if not hasWriteableOutdir:
raise ValueError('Environment variable BNPYOUTDIR not specified. Cannot save results to disk')
'''
########################################################### Configure data
########################################################### location
root = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
sys.path.append(os.path.join(root, 'demodata/'))
if 'BNPYDATADIR' in os.environ:
if os.path.exists(os.environ['BNPYDATADIR']):
sys.path.append(os.environ['BNPYDATADIR'])
else:
print "Warning: Environment variable BNPYDATADIR not a valid directory"
########################################################### Optional: viz
########################################################### package for plots
canPlot = False
'''
try:
from matplotlib import pylab
canPlot = True
except ImportError:
print "Error importing matplotlib. Plotting disabled."
print "Fix by making sure this produces a figure window on your system"
print " >>> from matplotlib import pylab; pylab.figure(); pylab.show();"
if canPlot:
import viz
__all__ = ['run', 'Run', 'learn', 'allocmodel','obsmodel', 'suffstats',
'HModel', 'init', 'util','ioutil','viz','distr', 'mergeutil']
'''
__all__ = ['run', 'Run', 'learn', 'allocmodel','obsmodel', 'suffstats',
'HModel', 'init', 'util','ioutil','distr', 'mergeutil']
| mit |
pylayers/pylayers | pylayers/location/geometric/Scenario_base.py | 2 | 39343 | # -*- coding:Utf-8 -*-
import numpy as np
import scipy as sp
import time
import pdb
import os
import sys
import pickle as pk
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection # scenario CDF mode 3D
from matplotlib.colors import colorConverter # scenario CDF mode 3D
from pylayers.location.geometric.util.boxn import *
from pylayers.location.geometric.util.model import *
import pylayers.location.geometric.util.geomview as g
import pylayers.location.geometric.util.cdf2
from pylayers.location.algebraic.rss import *
from pylayers.location.algebraic.toa import *
from pylayers.location.algebraic.tdoa import *
from pylayers.location.algebraic.hdf import *
from pylayers.location.algebraic.PyGraphTool import *
from pylayers.location.algebraic.PyMathTool import *
from pylayers.location.geometric.constraints.exclude import *
from pylayers.location.geometric.constraints.rss import *
from pylayers.location.geometric.constraints.toa import *
from pylayers.location.geometric.constraints.tdoa import *
from pylayers.location.geometric.constraints.cla import *
class Scenario(object):
"""
Class Scenario : definition of a static localization scenario
Attributes
----------
an : Anchor nodes coordinates
bn : Blind nodes coordinates
CDF : list
parmsc : parameters dictionary
scenar_type : 'simulated' | 'monte-carlo'
err_type : 'homogene' | 'hybrid'
Constrain_Type : 'TOA' | 'TDOA' | 'RSS' | 'MIX'
Algebraic_method : 'TLS' | 'WLS'
rss_mode : 'mean'
'rss0' : -34.7 dB
'd0' : 1 m
'pn' : 2.64
'std_dev_range' : arange(1,5,.5)
'dec_mode' : 'monodec'
'vcw' : validity constraint width : 3
'sigma_max' : 3
'an_nb' : Number of used anchor nodes
'std_dev_arange' : arange(1,5,.5)
'eval_pos' : True
'algebraic' : True
## parmsc_dis : display parameters dictionary
##
## METHODS
##
## info : Display information about the scenario
## show3(ibn) : 3D Geomview display ibn : index blind node
"""
def __init__(self,an,bn,parmsc={},parmsh={}):
self.an = an
self.bn = bn
self.std_v = np.zeros(len(an))
self.CDF = []
self.std_dev_arr_compt=0
self.vcw_arr_compt=0
self.CDF={}
if len(parmsc)==0:
# parameter of scenario
self.parmsc={}
self.parmsc['scenar_type']='simulated'#'monte_carlo' # choose the type of scenario : Simulated : give some AN and BN.
self.parmsc['err_type']='homogene' # homogene/hybrid = error applied on AN
parmsc['exclude_out']= np.array((np.min(self.an,axis=0),np.max(self.an,axis=0))) # look for estimated position inside the area created by the AN
self.parmsc['Constrain_Type']='TOA' # choose the type of constrain to compute between BN:TOA/TDOA/RSS/MIX
### TDOA parmsc
self.parmsc['Algebraic_method']='TLS' # if ['Constrain_Type']='TDOA' chose the TDOA method WLS/TLS
## RSS parmsc
self.parmsc['rss_mode']='mean'
self.parmsc['rss0']=-34.7
self.parmsc['d0']=1.0
self.parmsc['pn']=2.64
self.parmsc['dec_mode']='monodec' # choose the type of constrain to compute between BN:TOA/TDOA/RSS/MIX
self.parmsc['vcw'] = 3.0 # validity constrain width
self.parmsc['sigma_max']=7.0 # choose random standard deviation to apply to measures
self.parmsc['sigma_max_RSS'] =5.0
self.parmsc['sigma_max_TOA'] =3.0
self.parmsc['sigma_max_TDOA'] =3.0
self.parmsc['an_nb']=len(self.an) # choose the number of AN for performing positionning
self.parmsc['std_dev_arange']=np.arange(1.,5.,.5) # standart deviation range for monte carlo computing
# monte carlo 3D parameters
self.parmsc['vcw_arange']=np.arange(1.,4.,0.5) # validity constraint width (fourchette)
self.parmsc['eval_pos']=True # if True , the position of the last evaluated layer is estimated
self.parmsc['algebraic']=True # compute algebraic method
else:
self.parmsc = parmsc
if len(parmsh) == 0:
self.parmsh['display']=False # launch geomview interactively
self.parmsh['scene']=True # display whole scene
self.parmsh['boxes']=True # display constraint box
self.parmsh['constr_boxes']=False # display constraint box
self.parmsh['estimated']=True # display estimated point
else :
self.parmsh=parmsh
# display parameters
self.parmsc_dis={}
self.parmsc_dis['CDF_bound']=np.arange(0,5.0,0.01)
self.parmsc_dis['CDF_bound_auto_adapt']=True
self.parmsc_dis['room']=[]
if self.parmsc['scenar_type']=='simulated':
self.parmsc_dis['display']=True
self.parmsc_dis['save_fig']=False
elif self.parmsc['scenar_type']=='monte_carlo':
self.parmsc_dis['display']=False
self.parmsc_dis['save_fig']=True
else :
self.parmsc_dis['display']=True
self.parmsc_dis['save_fig']=False
if self.parmsc_dis['display']==False :
plt.rcParams['xtick.labelsize']='x-large'
plt.rcParams['ytick.labelsize']='x-large'
plt.rcParams['axes.labelsize']= 'large'
plt.rcParams['font.weight']='normal'
plt.rcParams['xtick.minor.size']=2
plt.rcParams['legend.fontsize']= 'xx-large'
plt.rcParams['font.size']=20
plt.rcParams['grid.linewidth']=3.5
plt.rcParams['xtick.major.pad']=20
if self.parmsc_dis['save_fig']:
self.fig = plt.figure(figsize=(28, 20))
self.ax = self.fig.add_subplot(111)
else:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.marker=[[':r',':g',':b',':k',':m',':c'],['--r','--g','--b','--k','--m','--c'],['-r','-g','-b','-k','--m','--c']]
if self.parmsc['Constrain_Type']=='TDOA':
self.dan=[]
for i in range(len(self.an)-1):
self.dan.append(vstack((an[0],an[i+1])))
def info(self):
"""
info : display information about current scenario
"""
print "-------------------------------"
print " SCENARIO INFORMATION"
print "-------------------------------"
print "scenario type : ",self.parmsc['scenar_type']
print "constraints type : ",self.parmsc['Constrain_Type']
#### NOEUDS
if self.parmsc['Constrain_Type']=='TDOA':
print "nb of AN couple : ", len(self.dan)
else :
print "nb AN : ", len(self.an)
print "nb BN : ", len(self.bn)
print 'limit of BNs positions',self.parmsc_dis['room'],'m'
if self.parmsc['scenar_type']=='simulated':
##### geometric computaton
print 'decimation method : ',self.parmsc['dec_mode']
if self.parmsc['Constrain_Type']=='TOA':
print 'validity constraint width :',self.parmsc['vcw'],'m'
print "Error type : ",self.parmsc['err_type']
print "std dev : ",self.parmsc['sigma_max'],'ns'
print "std dev : ",self.parmsc['sigma_max_RSS'],'ns'
print "std dev : ",self.parmsc['sigma_max_TOA'],'ns'
print "std dev : ",self.parmsc['sigma_max_TDOA'],'ns'
print "std vect : ",self.std_v,'ns'
########## algebraic
print "algebraic compute : ",self.parmsc['algebraic']
if self.parmsc['algebraic']:
if self.parmsc['Constrain_Type']=='TDOA':
print 'algebraic TDOA method :',self.parmsc['Algebraic_method'] # if ['Constrain_Type']='TDOA' chose the TDOA method WLS/TLS
####### RSS
if self.parmsc['Constrain_Type']=='RSS':
print 'rss mode :',self.parmsc['rss_mode']
print 'RSS0 :',self.parmsc['rss0']
print 'd0 : ',self.parmsc['d0']
print 'np : ',self.parmsc['pn']
#############" MONTE CARLO
if self.parmsc['scenar_type']=='monte_carlo':
lcdf=len(self.CDF)
print '\n'
for i in range(lcdf):
print 'computation nb',i+1
if self.CDF[i]['mode']=='algebraic':
print 'algebraic'
print '\n'
else :
print 'err_type :',self.CDF[i]['err_type']
print 'vcw :',self.CDF[i]['vcw'],'m'
print 'decimation :',self.CDF[i]['dec']
print 'sigma max :',self.CDF[i]['sigma_max'],self.std_v,'ns'
print '\n'
def show3(self,amb=True,sc='all'):
"""
show3(param) : Geomview 3D vizualization of the scenario
The scene is stored in the file scene.list in the geom directory
param :
display : True
R : Sphere radius (meter)
"""
self.cla.show3(amb=amb,sc=sc)
def run(self):
"""
run : run simulation of the scenario
"""
self.time_compute={}
self.time_compute['RGPA']=[]
self.time_compute['algebraic']=[]
self.CRB=[]
Nbn = len(self.bn)
Nan = len(self.an)
if self.parmsc['save_pe']:
self.pe = []
self.p_LS = []
if self.parmsc['save_CLA']:
self.CLA = []
self.err1 = np.array([])
#self.err2 = np.array([])
self.errx = np.array([])
self.erry = np.array([])
self.errz = np.array([])
self.errLS = np.array([])
self.errxLS = np.array([])
self.erryLS = np.array([])
self.errzLS = np.array([])
# list for algebraic
atoabn = []
astdbn = []
P = []
if self.parmsc['Constrain_Type']=='TDOA':
lbcl=Nan-1
else :
lbcl=Nan
tdoa_idx = nonzero(np.array(l_connect)=='TDOA')[0]
if self.parmsc['Constrain_Type']=='hybrid' or self.parmsc['Constrain_Type']=='test':
algehyb=1
else :
algehyb=0
if len(tdoa_idx) != 0:
lbcl=Nan-1
#self.dan=[]
#for i in range(len(tdoa_idx)-1):
# self.dan.append(vstack((self.an[tdoa_idx[0]],self.an[i+1])))
self.rss_idx = nonzero(np.array(l_connect)=='RSS')[0]
self.toa_idx = nonzero(np.array(l_connect)=='TOA')[0]
self.tdoa_idx = nonzero(np.array(l_connect)=='TDOA')[0]
self.ERRSAVE=[]
for ibn in range(Nbn): # for each blind node
self.errRSS=zeros((1))
self.errTOA=zeros((1))
self.errTDOA=zeros((1))
self.ibn=ibn
errli = []
Constraint.C_Id = 0 # reset constraint Id for each BN
print " Blind Node N",ibn+1,"/",Nbn
atv=[]
pbn = self.bn[ibn]
#print "Blind Node N ",ibn,pbn
self.tic_ensemblist=time.time()
cla = CLA(self.parmsh)
cla.bn = self.bn[ibn]
clarss = CLA(self.parmsh)
clatoa = CLA(self.parmsh)
clatdoa = CLA(self.parmsh)
#cla.C_Id=0
if parmsc['exclude_out'] != None :
E = Exclude(nodes=parmsc['exclude_out'])
cla.append(E)
for ian in range(lbcl): # for each AN or couple of AN (TDOA)
#print "Anchor Node N ",ian
try :
self.parmsc['Constrain_Type'] = self.parmsc['l_connect'][ian]
except :
pass
# pdb.set_trace()
rgpatimea=time.time()
if self.parmsc['Constrain_Type']=='TOA':
pan = self.an[ian]
# tvalue : true value (ns)
tvalue = np.sqrt(np.dot(pan-pbn,pan-pbn))/3e8
# err (ns)
err = (self.std_v[ibn,ian]*sp.randn())
while err + tvalue < 0:
err = (self.std_v[ibn,ian]*sp.randn())
self.errTOA=np.vstack((self.errTOA,err))
# value (toa : ns )
value = max(0,tvalue+err)
C = TOA(value=value*1e9,
std=self.std_v[ibn,ian]*1e9,
vcw=self.parmsc['vcw'],
p=pan)
cla.append(C)
clatoa.append(C)
if self.parmsc['Constrain_Type']=='TDOA':
pan = vstack((self.an[tdoa_idx[0]],self.an[ian+1]))
# dan : delay between 2 AN (ns)
dan = np.sqrt(dot(pan[0]-pan[1],pan[0]-pan[1]))/3e8
minvalue = -dan
maxvalue = dan
toa1 = np.sqrt(np.dot(pan[0]-pbn,pan[0]-pbn))/3e8
toa2 = np.sqrt(np.dot(pan[1]-pbn,pan[1]-pbn))/3e8
tvalue = toa2-toa1
err = self.std_v[ibn,ian]*sp.randn()
while ((tvalue + err) < minvalue) or ((tvalue + err) > maxvalue):
err = self.std_v[ibn,ian]*sp.randn()
self.errTDOA=np.vstack((self.errTDOA,err))
# tvalue : true value (ns)
# value = max(minvalue,tvalue + err)
# value = min(maxvalue,value)
# value (ns)
value = tvalue+err
C = TDOA(value=-value*1e9,
std=(self.std_v[ibn,ian]*1e9),
vcw=self.parmsc['vcw'],
p = pan)
cla.append(C)
#C = TDOA(p=pan,value=value,std=2)
clatdoa.append(C)
if self.parmsc['Constrain_Type']=='RSS':
pan = self.an[ian]
######################################## MODEL RSS NICO
# dr = max(0, (np.sqrt(np.dot(pan-pbn,pan-pbn))))
# M = Model()
# err = (self.std_v[ibn,ian]*1e-9*sp.randn())
# value = M.OneSlope(max(0,dr + err))
# value = min(500,value)
# value = max(-500,value)
######################################## MOHAMED
d0 = self.parmsc['d0']
RSSnp = vstack((self.parmsc['pn'],self.parmsc['pn']))
PL0= vstack((self.parmsc['rss0'],self.parmsc['rss0']))
RSSStd= vstack((self.parmsc['sigma_max_RSS'],self.parmsc['sigma_max_RSS']))
PP= vstack((self.bn[ibn],self.bn[ibn]))
PA= vstack((pan,pan))
rssloc = RSSLocation(PP)
value = (rssloc.getPLmean(PA.T, PP.T, PL0, d0, RSSnp))
err = (RSSStd*randn(shape(value)[0],shape(value)[1]))[0][0]
self.errRSS=np.vstack((self.errRSS,err))
value = value[0] + err
# value = rssloc.getPL(PA.T, PP.T, PL0, d0, RSSnp, RSSStd)
value = value
# value = self.parmsc['rss0']-10*self.parmsc['pn']*log10(dr/self.parmsc['d0'])+self.err_v[ibn,ian]
self.Model = {}
self.Model['PL0'] =self.parmsc['rss0']
self.Model['d0'] = self.parmsc['d0']
self.Model['RSSnp'] = self.parmsc['pn']
self.Model['RSSStd'] = self.parmsc['sigma_max_RSS']
self.Model['Rest'] = 'mode'
C = RSS(value=value,
std=self.std_v[ibn,ian],
vcw=self.parmsc['vcw'],
model=self.Model,
p=pan )
cla.append(C)
clarss.append(C)
if self.parmsc['algebraic']:
atv.append(value)
if len(self.rss_idx) != 0:
self.errRSS = delete(self.errRSS,0,0)
if len(self.toa_idx) != 0:
self.errTOA = delete(self.errTOA,0,0)
if len(self.tdoa_idx) != 0:
self.errTDOA = delete(self.errTDOA,0,0)
# version boite recursive
#
######################### CLA TOTALE
cla.merge2()
cla.refine(cla.Nc)
self.cla = cla
### DoubleListRefine version
cla.estpos2()
pe1=cla.pe
rgpatimeb=time.time()
self.time_compute['RGPA'].append(rgpatimeb-rgpatimea)
self.pe.append(pe1)
errli.append(np.sqrt(np.dot(pe1[:2]-pbn[:2],pe1[:2]-pbn[:2])))
#print len(parmsc['l_connect'])
if len(parmsc['l_connect']) > 4 : # pour ne pas calculer 2fois les cas non hybrides
if len(nonzero(np.array(parmsc['l_connect'])=='RSS')[0]) != 0:
for i in range(4):
clarss.c[i].Id=i
clarss.merge2()
clarss.refine(clarss.Nc)
clarss.estpos2()
clarss.bn=bn[ibn]
self.clarss=clarss
self.perss=clarss.pe
errli.append(np.sqrt(np.dot(self.perss[:2]-pbn[:2],self.perss[:2]-pbn[:2])))
if len(nonzero(np.array(parmsc['l_connect'])=='TOA')[0]) != 0:
for i in range(4):
clatoa.c[i].Id=i
clatoa.merge2()
clatoa.refine(clatoa.Nc)
clatoa.estpos2()
clatoa .bn=bn[ibn]
self.clatoa=clatoa
self.petoa=clatoa.pe
errli.append(np.sqrt(np.dot(self.petoa[:2]-pbn[:2],self.petoa[:2]-pbn[:2])))
if len(nonzero(np.array(parmsc['l_connect'])=='TDOA')[0]) != 0:
for i in range(3):
clatdoa.c[i].Id=i
clatdoa.merge2()
clatdoa.refine(clatdoa.Nc)
clatdoa.estpos2()
clatdoa.bn=bn[ibn]
self.clatdoa=clatdoa
self.petdoa=clatdoa.pe
errli.append(np.sqrt(np.dot(self.petdoa[:2]-pbn[:2],self.petdoa[:2]-pbn[:2])))
#print errli
err1=min(errli)
#print err1
self.err1 = np.hstack((self.err1,err1))
#self.err2 = np.hstack((self.err2,err2))
#if err >3:
# pdb.set_trace()
if self.parmsc['algebraic']:
if algehyb==1:
self.parmsc['Constrain_Type']='hybrid'
algetimea=time.time()
p_LS = self.algebraic_compute(atv)
algetimeb=time.time()
self.time_compute['algebraic'].append(algetimeb-algetimea)
if self.parmsc['save_pe']:
self.p_LS.append(p_LS)
if self.parmsc['Constrain_Type'] != 'hybrid'
errLS = np.sqrt(np.dot(p_LS[:2]-pbn[:2],p_LS[:2]-pbn[:2]))
#elif self.parmsc['Algebraic_method'] == 'CRB':
# errLS = np.sqrt(self.CRB)
else :
errLS = np.sqrt(np.dot(p_LS[:2]-pbn[:2],p_LS[:2]-pbn[:2]))
self.errLS = np.hstack((self.errLS,errLS))
if errli > errLS:
print 'NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNnn\n',errli,'\n',errLS
def algebraic_compute(self,atv):
rss_idx = self.rss_idx
toa_idx = self.toa_idx
tdoa_idx = self.tdoa_idx
P=[]
#
# print 'RSS_idx',rss_idx
# print 'TOA_idx',toa_idx
# print 'TDOA_idx',tdoa_idx
############### RSS ##############
RN_RSS=zeros(3)
Rss=zeros(1)
RSSStd=zeros(1)
d0 = self.parmsc['d0']
RSSnp = self.parmsc['pn']*ones((len(self.an[rss_idx]),1))
PL0=self.parmsc['rss0']*ones((len(self.an[rss_idx]),1))
RSSStd= self.parmsc['sigma_max_RSS']*ones((len(self.an[rss_idx]),1))
if len(rss_idx) != 0:
for i in rss_idx:
aa=np.array(self.an[i])
ss=np.array(atv[i])
RN_RSS=np.vstack((RN_RSS,aa))
Rss=np.vstack((Rss,ss))
RN_RSS=np.delete(RN_RSS,0,0).T
RN_RSS = RN_RSS[:2]
Rss=np.delete(Rss,0,0)
else :
RN_RSS=None
############### TOA ##############
RN_TOA=zeros(3)
ToA=zeros(1)
ToAStd=zeros(1)
if len(toa_idx) != 0:
for i in toa_idx:
aa=np.array(self.an[i])
ss=np.array(atv[i])
tt=np.array(self.std_v[self.ibn,i])
RN_TOA=np.vstack((RN_TOA,aa))
ToA=np.vstack((ToA,ss))
ToAStd=np.vstack((ToAStd,tt))
RN_TOA=np.delete(RN_TOA,0,0).T
RN_TOA = RN_TOA[:2]
ToA=np.delete(ToA,0,0)
ToAStd=np.delete(ToAStd,0,0)
else :
RN_TOA=None
############### TDOA ##############
RN_TDOA=zeros(3)
RN_TDOA_ref=zeros(3)
TDoA=zeros(1)
TDoAStd=zeros(1)
if len(tdoa_idx) != 0:
#RN_TDOA=zeros(3)
#for i in tdoa_idx:
# aa=np.array(self.an[i])
# RN_TDOA=np.vstack((RN_TDOA,aa))
RN_TDOA=(self.an[tdoa_idx[1:]]).T
RN_TDOA_ref=(self.an[tdoa_idx[0]]*ones(np.shape(RN_TDOA))).T
for i in tdoa_idx[0:-1]:
ss=np.array(atv[i])
tt=np.array(self.std_v[self.ibn,i])
TDoA=np.vstack((TDoA,ss))
TDoAStd=np.vstack((TDoAStd,tt))
TDoA=np.delete(TDoA,0,0)
TDoAStd=np.delete(TDoAStd,0,0)
RN_TDOA = RN_TDOA[:2]
RN_TDOA_ref = RN_TDOA_ref[:2]
else :
RN_TDOA=None
RN_TDOA_ref=None
# if RN_RSS != None :
# print '############### RSS ##################'
# print 'RNRSS\n',RN_RSS
# print 'PL0\n',PL0
# print 'd0\n', d0
# print 'RSS\n', Rss
# print 'RSSnp\n', RSSnp
# print 'RSSSTD\n',RSSStd
# if RN_TOA != None :
# print '############## TOA ##################'
# print 'RNTOA\n', RN_TOA
# print 'ToA\n',ToA
# print 'ToAStd\n', ToAStd
# if RN_TDOA != None :
# print '############### TDOA ##################'
# print 'RNTDOA\n', RN_TDOA
# print 'RNTDOA_ref\n', RN_TDOA_ref
# print 'TDOA\n', TDoA
# print 'TDOASTD\n', TDoAStd
self.tic_algebric=time.time()
S1=HDFLocation(RN_RSS, RN_TOA, RN_TDOA)
S2=RSSLocation(RN_RSS)
S3=ToALocation(RN_TOA)
S4=TDoALocation(RN_TDOA)
if self.parmsc['Algebraic_method'] == 'LS':
print 'to be implemented'
elif self.parmsc['Algebraic_method'] == 'TLS':
print 'to be implemented'
elif self.parmsc['Algebraic_method'] == 'WLS':
print 'to be implemented'
elif self.parmsc['Algebraic_method'] == 'TWLS':
if RN_RSS ==None and RN_TOA==None:
P=S4.TWLSTDoALocate(RN_TDOA,RN_TDOA_ref, TDoA, TDoAStd)
elif RN_RSS==None and RN_TDOA==None:
P=S3.TWLSToALocate(RN_TOA,ToA, ToAStd)
elif RN_TOA==None and RN_TDOA==None:
P=S2.TWLSRSSLocate(RN_RSS, PL0, d0, Rss, RSSnp, RSSStd, 'mode')
else:
P=S1.TWLSHDFLocate(RN_RSS, RN_TOA, RN_TDOA,RN_TDOA_ref, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, Rss, RSSnp, RSSStd, 'mode')
elif self.parmsc['Algebraic_method'] == 'ML':
PP=L*rand(2,1) # for 3D replace by L*rand(3,1)
P0=L*rand(2,1) # for 3D replace by L*rand(3,1)
# P0[2]=0.0 # for 3D uncomment
if RN_RSS ==None and RN_TOA==None:
P=S4.MLTDoALocate(PP, P0, RN_TDOA,RN_TDOA_ref, TDoA, TDoAStd)
elif RN_RSS==None and RN_TDOA==None:
P=S3.MLToALocate(PP, P0, RN_TOA,ToA, ToAStd)
elif RN_TOA==None and RN_TDOA==None:
P=S2.MLDRSSLocate(PP, P0, RN_RSS, PL0, d0, Rss, RSSnp, RSSStd)
else:
P=S1.MLHDFLocate(PP, P0, RN_RSS, RN_TOA, RN_TDOA,RN_TDOA_ref, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, Rss, RSSnp, RSSStd, 'mode')
elif self.parmsc['Algebraic_method'] == 'SDP':
if RN_RSS ==None and RN_TOA==None:
P=S4.SDPTDoALocate(RN_TDOA,RN_TDOA_ref, TDoA, TDoAStd)
elif RN_RSS==None and RN_TDOA==None:
P=S3.SDPToALocate(RN_TOA,ToA, ToAStd)
elif RN_TOA==None and RN_TDOA==None:
P=S2.SDPRSSLocate(RN_RSS, PL0, d0, -Rss, RSSnp, RSSStd, 'mode')
else:
P=S1.SDPHDFLocate(RN_RSS, RN_TOA, RN_TDOA,RN_TDOA_ref, ToA, ToAStd, TDoA, TDoAStd, PL0, d0, Rss, RSSnp, RSSStd, 'mode')
else :
print "You have to chose the self.parmsc['Algebraic_method'] between those choices :\n LS,TLS,WLS,TWLS,ML,SDP "
if self.parmsc['CRB'] :
CRBL=CRBLocation(None)
if len(rss_idx) != 0:
RSSStdX = self.errRSS#[rss_idx]#*ones(len(self.an[rss_idx]))
if len(toa_idx) != 0:
TOAStdX =self.errTOA#[toa_idx]#*ones((len(self.an[toa_idx]),1))
if len(tdoa_idx) != 0:
TDOAStdX =self.errTDOA#[tdoa_idx]#*ones((len(self.an[tdoa_idx])-1,1))
PP=self.bn[self.ibn,:2]
###################################### RSS PUR
if RN_TOA==None and RN_TDOA==None:
print 'RSS'
self.CRB.append(sqrt(CRBL.CRB_RSS_fim(PP, RN_RSS, RSSnp,RSSStdX)))
###################################### TOA PUR
elif RN_RSS==None and RN_TDOA==None: # TOA
print 'TOA CRB'
self.CRB.append(sqrt(CRBL.CRB_TOA_fim(PP, RN_TOA,TOAStdX)))
###################################### TDOA PUR
elif RN_RSS ==None and RN_TOA==None : # TDOA
print 'TDOA'
self.CRB.append(sqrt(CRBL.CRB_TDOA_fim(PP, RN_TDOA,RN_TDOA_ref,TDOAStdX)))
elif RN_TOA==None and RN_TDOA!= None:
###################################### TDOA
if RN_RSS==None:
print 'TDOA2'
self.CRB.append(sqrt(CRBL.CRB_TDOA_fim(PP, RN_RSS, PL0, RSSStdX)) )
###################################### RSS + TDOA
else :
print 'RSS+TDOA'
self.CRB.append(sqrt(CRBL.CRB_RSS_TDOA_fim(PP, RN_RSS, RN_TDOA,RN_TDOA_ref,RSSnp, RSSStdX, TDOAStdX )))
elif RN_TOA!=None and RN_TDOA== None:
##################################### TOA
if RN_RSS==None:
print 'TOA'
self.CRB.append(sqrt(CRBL.CRB_TOA_fim(PP, RN_TOA,TOAStdX)))
##################################### RSS + TOA
else :
print 'RSS + TOA'
self.CRB.append(sqrt(CRBL.CRB_RSS_TOA_fim(PP, RN_RSS,RN_TOA, RSSnp, RSSStdX, TOAStdX)))
elif RN_TOA!=None and RN_TDOA!= None:
##################################### TOA+TDOA
if RN_RSS==None :
print 'TOA + TDOA'
self.CRB.append(sqrt(CRBL.CRB_TOA_TDOA_fim(PP, RN_TOA, RN_TDOA, RN_TDOA_ref,TOAStdX,TDOAStdX)))
##################################### RSS+TOA+TDOA
else :
print 'RSS + TOA + TDOA'
self.CRB.append(sqrt(CRBL.CRB_RSS_TOA_TDOA_fim(PP, RN_RSS, RN_TOA, RN_TDOA, RN_TDOA_ref, RSSnp, RSSStdX, TOAStdX, TDOAStdX)))
P=array(P)
return P[:,0]
def CDF_figure_gen(self,in_cdf,c_nb):
bound=self.parmsc_dis['CDF_bound']
if in_cdf['mode']=='algebraic':
cdf=in_cdf['cdf_alg']
else:
cdf=in_cdf['cdf']
#size ko
room=self.parmsc_dis['room']
lbound=len(bound)
cmpt=in_cdf['cmpt']
c=('c' +str(c_nb))
if self.parmsc['scenar_type']=='simulated' :
c=self.ax.plot(bound,cdf[:lbound],self.marker[cmpt][c_nb],linewidth=3)
return c
def CDFdisplay(self):
title = 'CDF : ' + self.parmsc['Constrain_Type'] + '\n'+'for '+str(len(self.bn)) +' random BNs positions ' +'\n' +r' $\sigma_{max}(RSS)$=' +str(self.parmsc['sigma_max_RSS']) +'m $\sigma_{max}(TOA)$=' +str(self.parmsc['sigma_max_TOA']) +' m ' +'$\sigma_{max}(TDOA)$=' +str(self.parmsc['sigma_max_TDOA']) +' m '
leg_alg = 'Algebraic methode :' +self.parmsc['Algebraic_method']
leg2 = 'Geometric Box' +r' $\sigma_{max} RSS$=' +str(self.parmsc['sigma_max_RSS']) +'TOA' +str(self.parmsc['sigma_max_TOA']) +'TDOA' +str(self.parmsc['sigma_max_TDOA']) +' ns ' +str(self.std_v) +' ' +' vcw:' +str(self.parmsc['vcw'])
LEG = 'Hybrid ensemblist'
ld = []
"""
d0 = {}
d0['values'] = self.err2
d0['bound'] = np.linspace(0,max(self.err2),100)
d0['xlabel'] = 'distance (m)'
d0['ylabel'] = 'Cummulative density function'
d0['legend'] = 'Method Grid'
d0['title'] = title
d0['marker'] = 'r-'
d0['linewidth'] = 3
d0['filename'] = 'essai.png'
ld.append(d0)
"""
d2 = {}
d2['values'] = self.err1
d2['bound'] = np.linspace(0,20,100)#np.linspace(0,max(self.err1),100)
d2['xlabel'] = 'distance (m)'
d2['ylabel'] = 'Cummulative density function'
d2['legend'] = LEG#'Method Box'
d2['title'] = title
d2['marker'] = 'g-'
d2['linewidth'] = 3
d2['filename'] = 'essai.png'
ld.append(d2)
if self.parmsc['algebraic']:
d1 = {}
d1['values'] = self.errLS
d1['bound'] = np.linspace(0,20,100)#np.linspace(0,max(self.errLS),100)
d1['xlabel'] = 'distance (m)'
d1['ylabel'] = 'Cummulative density function'
d1['legend'] = leg_alg
d1['title'] = 'title'
d1['marker'] = 'b-'
d1['linewidth'] = 3
d1['filename'] = 'essai.png'
ld.append(d1)
# c1 = CDF.CDF(ld)
# c1.show()
fname=filename
self.CDF[fname]={}
self.CDF[fname]['L']=[]
self.CDF[fname]['L'].append(self.err1)
self.CDF[fname]['L'].append(self.errLS)
self.CDF[fname]['L'].append(self.CRB)
self.CDF[fname]['leg']=['Geometric', 'Algebraic','CRB']
self.CDF[fname]['limit']=max(max(self.err1),max(self.errLS))
if os.system('cd ./cdf/'+fname) == 512:
os.system('mkdir ./cdf/'+fname)
np.save('./cdf/' +fname +'/L',self.CDF[fname]['L'])
np.save('./cdf/' +fname +'/leg',self.CDF[fname]['leg'])
np.save('./cdf/' +fname +'/bound',self.CDF[fname]['limit'])
# cdf(self.err1,"g-","Geometric method",1)
#
# cdf(self.err1,"g-","RGPA",2)
# cdf(self.errLS,"b-",self.parmsc['Algebraic_method'],2)
# cdf(self.CRB,"g-.",r"$\sqrt{CRLB}$",2)
# plt.legend(loc=4,numpoints=1)
# plt.axis([0,20,0,1])
# plt.grid('on')
# plt.xlabel("Positioning error (m)")
# plt.ylabel("Cumulative probability")
# plt.savefig(filename, format="pdf")
# plt.close()
# file.write("PA "+str(median(self.err1))+"\n")
# file1.write("Geo "+str(mean(self.err1))+"\n")
# file1.write("ML "+str(mean(self.errLS))+"\n")
# file1.write("CRB "+str(mean(self.CRB))+"\n")
def compute(self):
"""
compute : start the simulation of the current scenario
"""
self.std_v_save=[]
if self.parmsc['scenar_type']=='simulated':
if self.parmsc['err_type']=='homogene':
if self.parmsc['Constrain_Type'] != 'hybrid':
self.std_v=self.parmsc['sigma_max']*sp.ones(len(self.bn),len(self.an))
self.std_v_save.append(self.std_v)
else :
self.std_v=zeros((len(self.bn),len(self.an)))
pstdv=nonzero(array(self.parmsc['l_connect'])=='RSS')[0]
self.std_v[:,pstdv]=self.parmsc['sigma_max_RSS']*sp.ones(len(self.bn),len(self.an[pstdv]))
pstdv=nonzero(array(self.parmsc['l_connect'])=='TOA')[0]
self.std_v[:,pstdv]=self.parmsc['sigma_max_TOA']*sp.ones(len(self.bn),len(self.an[pstdv]))
pstdv=nonzero(array(self.parmsc['l_connect'])=='TDOA')[0]
self.std_v[:,pstdv]=self.parmsc['sigma_max_TDOA']*sp.ones(len(self.bn),len(self.an[pstdv]))
elif self.parmsc['err_type']=='hybrid':
if self.parmsc['Constrain_Type'] != 'hybrid':
self.std_v=self.parmsc['sigma_max']*sp.rand(len(self.bn),len(self.an))
self.std_v_save.append(self.std_v)
else :
self.std_v=zeros((len(self.bn),len(self.an)))
pstdv=nonzero(array(self.parmsc['l_connect'])=='RSS')[0]
self.std_v[:,pstdv]=(self.parmsc['sigma_max_RSS'])*sp.ones((len(self.bn),len(self.an[pstdv])))
pstdv=nonzero(array(self.parmsc['l_connect'])=='TOA')[0]
self.std_v[:,pstdv]=(self.parmsc['sigma_max_TOA']/3e8)*sp.ones((len(self.bn),len(self.an[pstdv])))
pstdv=nonzero(array(self.parmsc['l_connect'])=='TDOA')[0]
self.std_v[:,pstdv]=(self.parmsc['sigma_max_TDOA']/3e8)*sp.ones((len(self.bn),len(self.an[pstdv])))
elif self.parmsc['err_type']=='test':
self.bn =np.array([ 7.88602567, 17.46029539, 0. ])
self.bn = vstack((self.bn,self.bn))
else :
print 'compute() : non-valid err_type'
cdfbound = max([self.parmsc['sigma_max_RSS'],self.parmsc['sigma_max_TOA'],self.parmsc['sigma_max_TDOA']])
# self.parmsc_dis['CDF_bound']=np.arange(0,cdfbound+3.,0.01)
self.parmsc_dis['CDF_bound']=np.arange(0,20,0.01)
self.run()
self.CDFdisplay()
if __name__=="__main__":
L = 20
H1 = 0.0
H2 = H1
H3 = H1
Nbn = 1000
save_time={}
np.random.seed(0)
connect = {}
connect['RSS'] = 0
connect['TOA'] = 0
connect['TDOA'] = 1
filename = ''
sp.random.seed(0)
an =zeros(3)
# node involved in localization
l_connect=[]
if connect['RSS']:
BS1 = np.array([2*L/3.0,0.0,H1])
l_connect.append('RSS')
BS2 = np.array([L,2*L/3.0,H2])
l_connect.append('RSS')
BS3 = np.array([L/3.0,L,H3])
l_connect.append('RSS')
BS4 = np.array([0.0,L/3.0,H1])
l_connect.append('RSS')
an = vstack((an,BS1))
an = vstack((an,BS2))
an = vstack((an,BS3))
an = vstack((an,BS4))
filename = filename + '_RSSI'
if connect['TOA']:
MS1 = np.array([L/3.0,0.0,H1])
l_connect.append('TOA')
MS2 = np.array([L,L/3.0,H2])
l_connect.append('TOA')
MS3 = np.array([2*L/3.0,L,H2])
l_connect.append('TOA')
MS4 = np.array([0.0,2*L/3.0,H3])
l_connect.append('TOA')
an = vstack((an,MS1))
an = vstack((an,MS2))
an = vstack((an,MS3))
an = vstack((an,MS4))
filename = filename + '_TOA'
if connect['TDOA']:
Ap1 = np.array([0.0,0.0,H1])
l_connect.append('TDOA')
Ap2 = np.array([L,0.0,H2])
l_connect.append('TDOA')
Ap3 = np.array([0.0,L,H3])
l_connect.append('TDOA')
Ap4 = np.array([L,L,H1])
l_connect.append('TDOA')
an = vstack((an,Ap1))
an = vstack((an,Ap2))
an = vstack((an,Ap3))
an = vstack((an,Ap4))
filename = filename + '_TDOA'
an=np.delete(an,0,0)
# filename = filename + '.pdf'
##### LIMITE POUR CONTRAINTE EXCLUDE
BOUND1 = np.array([0,0,-0.5])
BOUND2 = np.array([L,L,0.5])
##### RANDOM BLIND NODES
bn = L*rand(Nbn,3)
bn[:,2] = 0.0
############ SCENARION PARAMETERS
parmsc = {}
parmsc['room']=vstack((np.min(bn[:,:2],axis=0),np.max(bn[:,:2],axis=0)))
parmsc['algebraic'] = True
parmsc['CRB'] = True
parmsc['exclude_out'] = np.array((BOUND1 ,BOUND2)) # contraint boundary or None
parmsc['vcw'] = 1.0
parmsc['save_pe'] = True
parmsc['save_CLA'] = False
parmsc['sigma_max_RSS'] = 3.98#4.34 # en (dB)
parmsc['sigma_max_TOA'] = 1.142#2.97 # en (m)
parmsc['sigma_max_TDOA'] = 1.85#3.55 # en (m)
parmsc['std_dev_arange'] = np.array([3]) # arange(1.,4.,1.)
parmsc['scenar_type'] ='simulated' # 'monte_carlo' # choose the type of scenario : Simulated : give some AN and BN.
parmsc['err_type'] ='hybrid' # homogene/hybrid = error applied on AN
parmsc['Constrain_Type'] ='hybrid' # 'TOA', 'RSS'
parmsc['l_connect'] = l_connect
## TDOA parmsc
parmsc['Algebraic_method'] ='ML' # 'TLS'
## RSS parmsc
parmsc['rss_mode'] = 'mode'
parmsc['rss0'] = 36.029#-34.7
parmsc['d0'] = 1.0
parmsc['pn'] = 2.386#2.64
parmsc['an_nb'] = len(an) # choose the number of AN for performing positionning
# Monte Carlo simulation parameters
parmsc['eval_pos'] = True # if True , the position of the last evaluated layer is estimated
parmsc['ceval'] = True # if True continuous evaluation
######################" CLA.SHOW3 PARAMETERS
parmsh = {}
parmsh['display']=False # launch geomview interactively
parmsh['scene']=True # display whole scene
parmsh['boxes']=True # display constraint box
parmsh['constr_boxes']=True # display constraint box
parmsh['estimated']=True # display estimated point
#
# Create the scenario
#
S = Scenario(an,bn,parmsc,parmsh)
S.compute()
# save_time[filename]=S.time_compute
# file=open("save_time.pck", "w")
# pk.dump(save_time,file)
| mit |
hsiaoyi0504/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
josherick/bokeh | bokeh/charts/builder/horizon_builder.py | 43 | 12508 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Horizon class which lets you build your Horizon charts just
passing the arguments to the Chart class and calling the proper functions.
"""
from __future__ import absolute_import, division
import math
from six import string_types
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, Range1d, DataRange1d, FactorRange, GlyphRenderer, CategoricalAxis
from ...models.glyphs import Patches
from ...properties import Any, Color, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Horizon(values, index=None, num_folds=3, pos_color='#006400',
neg_color='#6495ed', xscale='datetime', xgrid=False, ygrid=False,
**kws):
""" Create a Horizon chart using :class:`HorizonBuilder <bokeh.charts.builder.horizon_builder.HorizonBuilder>`
render the geometry from values, index and num_folds.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
num_folds (int, optional): The number of folds stacked on top
of each other. (default: 3)
pos_color (color, optional): The color of the positive folds.
(default: "#006400")
neg_color (color, optional): The color of the negative folds.
(default: "#6495ed")
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import datetime
from collections import OrderedDict
from bokeh.charts import Horizon, output_file, show
now = datetime.datetime.now()
dts = [now+datetime.timedelta(seconds=i) for i in range(10)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26, 27, 27, 28, 26, 20]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126, 122, 95, 90, 110, 112]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26, 25, 26, 45, 26, 30]
hz = Horizon(xyvalues, index='Date', title="Horizon Example", ylabel='Sample Data', xlabel='')
output_file('horizon.html')
show(hz)
"""
tools = kws.get('tools', True)
if tools == True:
tools = "save,resize,reset"
elif isinstance(tools, string_types):
tools = tools.replace('pan', '')
tools = tools.replace('wheel_zoom', '')
tools = tools.replace('box_zoom', '')
tools = tools.replace(',,', ',')
kws['tools'] = tools
chart = create_and_build(
HorizonBuilder, values, index=index, num_folds=num_folds, pos_color=pos_color,
neg_color=neg_color, xscale=xscale, xgrid=xgrid, ygrid=ygrid, **kws
)
# Hide numerical axis
chart.left[0].visible = False
# Add the series names to the y axis
chart.extra_y_ranges = {"series": FactorRange(factors=chart._builders[0]._series)}
chart.add_layout(CategoricalAxis(y_range_name="series"), 'left')
return chart
class HorizonBuilder(Builder):
"""This is the Horizon class and it is in charge of plotting
Horizon charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, separate the data into
a number of folds which stack on top of each others.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
neg_color = Color("#6495ed", help="""
The color of the negative folds. (default: "#6495ed")
""")
num_folds = Int(3, help="""
The number of folds stacked on top of each other. (default: 3)
""")
pos_color = Color("#006400", help="""
The color of the positive folds. (default: "#006400")
""")
def __init__(self, values, **kws):
"""
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a
common custom index for all data series as follows:
- As a 1d iterable of any sort (of datetime values)
that will be used as series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame). The values
must be datetime values.
legend (str, optional): the legend of your chart. The legend
content is inferred from incoming input.It can be
``top_left``, ``top_right``, ``bottom_left``,
``bottom_right``. ``top_right`` is set if you set it
as True. Defaults to None.
palette(list, optional): a list containing the colormap as
hex values.
num_folds (int, optional):
pos_color (hex color string, optional): t
neg_color (hex color string, optional): the color of
the negative folds
(default: #6495ed)
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
super(HorizonBuilder, self).__init__(values, **kws)
self._fold_names = []
self._source = None
self._series = []
self._fold_height = {}
self._max_y = 0
def fold_coordinates(self, y, fold_no, fold_height, y_origin=0, graph_ratio=1):
""" Function that calculate the coordinates for a value given a fold
"""
height = fold_no * fold_height
quotient, remainder = divmod(abs(y), float(height))
v = fold_height
# quotient would be 0 if the coordinate is represented in this fold
# layer
if math.floor(quotient) == 0:
v = 0
if remainder >= height - fold_height:
v = remainder - height + fold_height
v = v * graph_ratio
# Return tuple of the positive and negative relevant position of
# the coordinate against the provided fold layer
if y > 0:
return (v + y_origin, fold_height * graph_ratio + y_origin)
else:
return (y_origin, fold_height * graph_ratio - v + y_origin)
def pad_list(self, l, padded_value=None):
""" Function that insert padded values at the start and end of
the list (l). If padded_value not provided, then duplicate the
values next to each end of the list
"""
if len(l) > 0:
l.insert(0, l[0] if padded_value is None else padded_value)
l.append(l[-1] if padded_value is None else padded_value)
return l
def _process_data(self):
"""Use x/y data from the horizon values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the multiple area glyphes inside the ``_yield_renderers`` method.
"""
for col in self._values.keys():
if isinstance(self.index, string_types) and col == self.index:
continue
self._series.append(col)
self._max_y = max(max(self._values[col]), self._max_y)
v_index = [x for x in self._values_index]
self.set_and_get("x_", col, self.pad_list(v_index))
self._fold_height = self._max_y / self.num_folds
self._graph_ratio = self.num_folds / len(self._series)
fill_alpha = []
fill_color = []
for serie_no, serie in enumerate(self._series):
self.set_and_get('y_', serie, self._values[serie])
y_origin = serie_no * self._max_y / len(self._series)
for fold_itr in range(1, self.num_folds + 1):
layers_datapoints = [self.fold_coordinates(
x, fold_itr, self._fold_height, y_origin, self._graph_ratio) for x in self._values[serie]]
pos_points, neg_points = map(list, zip(*(layers_datapoints)))
alpha = 1.0 * (abs(fold_itr)) / self.num_folds
# Y coordinates above 0
pos_points = self.pad_list(pos_points, y_origin)
self.set_and_get("y_fold%s_" % fold_itr, serie, pos_points)
self._fold_names.append("y_fold%s_%s" % (fold_itr, serie))
fill_color.append(self.pos_color)
fill_alpha.append(alpha)
# Y coordinates below 0
neg_points = self.pad_list(
neg_points, self._fold_height * self._graph_ratio + y_origin)
self.set_and_get("y_fold-%s_" % fold_itr, serie, neg_points)
self._fold_names.append("y_fold-%s_%s" % (fold_itr, serie))
fill_color.append(self.neg_color)
fill_alpha.append(alpha)
# Groups shown in the legend will only appear once
if serie_no == 0:
self._groups.append(str(self._fold_height * fold_itr))
self._groups.append(str(self._fold_height * -fold_itr))
self.set_and_get('fill_', 'alpha', fill_alpha)
self.set_and_get('fill_', 'color', fill_color)
self.set_and_get('x_', 'all', [self._data[
'x_%s' % serie] for serie in self._series for y in range(self.num_folds * 2)])
self.set_and_get(
'y_', 'all', [self._data[f_name] for f_name in self._fold_names])
def _set_sources(self):
"""Push the Horizon data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d(range_padding=0)
self.y_range = Range1d(start=0, end=self._max_y)
def _yield_renderers(self):
"""Use the patch glyphs to connect the xy points in the time series.
It requires the positive and negative layers
Takes reference points from the data loaded at the ColumnDataSource.
"""
patches = Patches(
fill_color='fill_color', fill_alpha='fill_alpha', xs='x_all', ys='y_all')
renderer = GlyphRenderer(data_source=self._source, glyph=patches)
# self._legends.append((self._groups[i-1], [renderer]))
yield renderer
# TODO: Add the tooltips to display the dates and all absolute y values for each series
# at any vertical places on the plot
# TODO: Add the legend to display the fold ranges based on the color of
# the fold
| bsd-3-clause |
emhuff/regularizedInversion | reweightDES.py | 1 | 8538 | #!/usr/bin/env python
import matplotlib as mpl
mpl.use('Agg')
import argparse
import matplotlib.pyplot as plt
import cfunc
import mapfunc
import sys
import numpy as np
import healpy as hp
import esutil
import atpy
import sklearn
import numpy.lib.recfunctions as rf
from sklearn.neighbors import NearestNeighbors as NN
def modestify(data):
modest = np.zeros(len(data), dtype=np.int32)
galcut = (data['flags_i'] <=3) & -( ((data['class_star_i'] > 0.3) & (data['mag_auto_i'] < 18.0)) | ((data['spread_model_i'] + 3*data['spreaderr_model_i']) < 0.003) | ((data['mag_psf_i'] > 30.0) & (data['mag_auto_i'] < 21.0)))
modest[galcut] = 1
starcut = (data['flags_i'] <=3) & ((data['class_star_i'] > 0.3) & (data['mag_auto_i'] < 18.0) & (data['mag_psf_i'] < 30.0) | (((data['spread_model_i'] + 3*data['spreaderr_model_i']) < 0.003) & ((data['spread_model_i'] +3*data['spreaderr_model_i']) > -0.003)))
modest[starcut] = 3
neither = -(galcut | starcut)
modest[neither] = 5
data = rf.append_fields(data, 'modtype', modest)
print len(data), np.sum(galcut), np.sum(starcut), np.sum(neither)
return data
def reweightMatch(rwt_tags = None, truthSample= None, matchSample = None, N_nearest = 50):
if rwt_tags is None:
rwt_tags = ['','']
truthSample_arr = np.zeros(( len(truthSample), len(rwt_tags) ))
matchSample_arr = np.zeros((len(matchSample), len(rwt_tags)))
for thing,i in zip(rwt_tags,xrange(len(rwt_tags))):
truthSample_arr[:,i] = truthSample[thing]
matchSample_arr[:,i] = matchSample[thing]
NP = calcNN(N_nearest, truthSample_arr, matchSample_arr)
NT = calcNN(N_nearest, matchSample_arr, matchSample_arr)
bad = (NP == 1)
wts = NP * 1./NT
wts[bad] = 0.
return wts
def calcNN(Nnei, magP, magT):
from sklearn.neighbors import NearestNeighbors as NN
# Find Nnei neighbors around each point
# in the training sample.
# Nnei+1 because [0] is the point itself.
nbrT = NN(n_neighbors = Nnei+1).fit(magT)
distT, indT = nbrT.kneighbors(magT, n_neighbors = Nnei+1)
# Find how many neighbors are there around
# each point in the photometric sample
# within the radius that contains Nnei in
# the training one.
nbrP = NN(radius = distT[:, Nnei]).fit(magP)
distP, indP = nbrP.radius_neighbors(magT, radius = distT[:, Nnei])
# Get the number of photometric neighbors
NP = []
for i in range(len(distP)): NP.append(len(distP[i])-1)
NP = np.asarray(NP)
del nbrT, nbrP, distT, distP, indT, indP
return NP
def perturb(cat, orig_size, tags = None, sigma = 0.1):
newcat= np.random.choice(cat,size=orig_size)
for thistag in tags:
newcat[thistag] = newcat[thistag] + sigma*np.random.randn(orig_size)
return newcat
def kdEst(val1,val2,x,y):
import scipy.stats as st
xx,yy = np.meshgrid(x,y)
positions = np.vstack([xx.ravel(), yy.ravel()])
vals = np.vstack([val1,val2])
kernel = st.gaussian_kde(vals)
f = np.reshape(kernel(positions).T, xx.shape)
return f, xx, yy
def getDES():
import pyfits
path = '../../Data/GOODS/'
desAll = esutil.io.read(path+"des_i-griz.fits")
desAll = modestify(desAll)
desStars = desAll[desAll['modtype'] == 3]
def getTruthStars():
path = '../../Data/GOODS/training-stars.fits'
data = esutil.io.read(path,ext=1)
data = data[data["TRUE_CLASS"] == 1]
#data = data[data['MAG_MODEL_I'] < 21]
#usable_fields = [10,11,12,15,19]
usable_fields = [11,12,13,14,16,17,18]
usable_fields = [10,11,12,13,14,15,16,17,18,19]
cat = []
for item in data:
if item['FIELD'] in usable_fields:
cat.append(item)
cat = np.array(cat)
return cat
def getConfidenceLevels(hist,sigma= [0.68, 0.95]):
lik = hist.reshape(hist.size)
lsort = np.sort(lik)
dTotal = np.sum(lsort)
dSum = 0.
nIndex = lsort.size
clevels = []
for thisSigma in sigma:
while (dSum < dTotal * thisSigma):
nIndex -= 1
dSum += lsort[nIndex]
clevels.append(lsort[nIndex])
print clevels
return clevels
def main(argv):
path = '../../Data/GOODS/'
#desStars = np.random.choice(desStars,size=10000)
balrogStars = esutil.io.read(path+"matched_i-griz.fits")
#balrogStars = np.random.choice(balrogStars,size = 10000)
desStars = getTruthStars()
desKeep =( (desStars['MAG_AUTO_G'] < 50) &
(desStars['MAG_AUTO_R'] < 50) &
(desStars['MAG_AUTO_I'] < 50) &
(desStars['MAG_AUTO_Z'] < 50) )
des = np.empty(desStars.size, dtype = [('g-r',np.float),('r-i',np.float),('i-z',np.float),('i',np.float),('r',np.float),('g',np.float),('z',np.float)])
balrog = np.empty(balrogStars.size, dtype = [('g-r',np.float),('r-i',np.float),('i-z',np.float),('i',np.float)])
des['g-r'] = desStars['MAG_AUTO_G'] - desStars['MAG_AUTO_R']
des['r-i'] = desStars['MAG_AUTO_R'] - desStars['MAG_AUTO_I']
des['i-z'] = desStars['MAG_AUTO_I'] - desStars['MAG_AUTO_Z']
des['i'] = desStars['MAG_AUTO_I']
des['g'] = desStars['MAG_AUTO_G']
des['r'] = desStars['MAG_AUTO_R']
des['z'] = desStars['MAG_AUTO_Z']
des = des[desKeep]
bright = (des['i'] < 22.) & (des['r'] < 22.) & ( des['g'] < 22. ) & ( des['z'] < 22. )
balrog['g-r'] = balrogStars['mag_auto_g'] - balrogStars['mag_auto_r']
balrog['r-i'] = balrogStars['mag_auto_r'] - balrogStars['mag_auto_i']
balrog['i-z'] = balrogStars['mag_auto_i'] - balrogStars['mag_auto_z']
balrog['i'] = balrogStars['mag_auto_i']
wts = reweightMatch(truthSample = des, matchSample = balrog, rwt_tags = ['g-r','r-i','i-z'])
keep = np.random.random(balrog.size) * np.max(wts) <= wts
balrog_rwt = balrog[keep]
balrog_out = balrogStars[keep]
esutil.io.write('balrog-des-reweighted.fits',balrog_out,clobber=True)
fig,((ax1,ax2),(ax3,ax4)) = plt.subplots(nrows=2,ncols=2,figsize=(14,14))
from matplotlib.colors import LogNorm, Normalize
x_b = np.linspace(-1,3,100)
y_b = np.linspace(-1,3,100)
bContours, xx_b, yy_b = kdEst(balrog_out['mag_r']-balrog_out['mag_i'],balrog_out['mag_g'] - balrog_out['mag_r'],x_b,y_b)
bLevels = getConfidenceLevels(bContours)
cfset = ax1.contour(xx_b, yy_b, bContours, bLevels, zorder=2)
# For labeling:
import matplotlib.patches as mpatches
red_patch = mpatches.Patch(color='red', label='DES confirmed stars')
blue_patch = mpatches.Patch(color='blue', label='deconvolved locus')
ax1.plot(des['r-i'],des['g-r'],',',markersize=0.5,color='green')
ax1.plot(des[bright]['r-i'],des[bright]['g-r'],'.',lw=0,markersize=5,color='red',alpha=0.5)
ax1.set_xlim(-1,2)
ax1.set_ylim(-1,3)
ax1.legend(loc='best',handles=[red_patch,blue_patch])
ax1.set_xlabel("r-i")
ax1.set_ylabel("g-r")
x_r = np.linspace(-1,3,100)
y_r = np.linspace(-2,3,100)
rContours, xx_r, yy_r = kdEst(balrog_out['mag_i'] - balrog_out['mag_z'],balrog_out['mag_r'] - balrog_out['mag_i'],x_r,y_r)
rLevels = getConfidenceLevels(rContours)
cfset = ax2.contour(xx_r, yy_r, rContours, rLevels, zorder=2)
ax2.plot(des['i-z'],des['r-i'],'.',lw=0,markersize=.5,color='green')
ax2.plot(des[bright]['i-z'],des[bright]['r-i'],'.',lw=0,markersize=5,color='red',alpha=0.5)
ax2.set_xlim(-1,2)
ax2.set_ylim(-1,3)
ax2.set_ylabel("r-i")
ax2.set_xlabel("i-z")
ax3.plot(balrogStars['mag_r']-balrogStars['mag_i'],balrogStars['mag_g'] - balrogStars['mag_r'],',',color='blue')
ax3.plot(balrog_out['mag_r']-balrog_out['mag_i'],balrog_out['mag_g'] - balrog_out['mag_r'],'.',color='red',alpha=0.75)
ax3.set_xlabel("r-i")
ax3.set_ylabel("g-r")
ax3.set_xlim(-1,2)
ax3.set_ylim(-1,3)
ax4.plot(balrogStars['mag_i'] - balrogStars['mag_z'],balrogStars['mag_r'] - balrogStars['mag_i'],',',color='blue')
ax4.plot(balrog_out['mag_i'] - balrog_out['mag_z'],balrog_out['mag_r'] - balrog_out['mag_i'],'.',color='red',alpha=0.75)
ax4.set_ylabel("r-i")
ax4.set_xlabel("i-z")
ax4.set_xlim(-1,2)
ax4.set_ylim(-1,3)
fig.savefig("des_deconvolved_locus.png")
print "(iter 1) fraction of original sample kept: ",balrog_out.size * 1./balrog.size
stop
#plt.show()
if __name__ == "__main__":
import pdb, traceback
try:
main(sys.argv)
except:
thingtype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| mit |
EDUlib/eTracesX | Translation_software/edx_to_MOOCdb/extractor.py | 1 | 6316 | #import MySQLdb
import csv
import os
import pickle
import json
import pandas as pd
import config as cfg
import edxTrackLogJSONParser
import gzip
def get_events():
src = cfg.INPUT_SOURCE
if src=='csv':
return CSVExtractor(cfg.EDX_TRACK_EVENT)
elif src=='mysql':
return MySQLExtractor()
elif src=='json':
return JSONExtractor(cfg.EDX_TRACK_EVENT_LOG)
class CSVExtractor(object):
"""
Loads data from CSV export of Stanford datastage tables
"""
# CSV Fields
ANSWER_FIELDNAMES = ['answer_id','problem_id','answer','course_id']
CORRECT_MAP_FIELDNAMES = ['correct_map_id','answer_identifier', 'correctness','npoints','msg','hint','hintmode','queustate']
EDX_TRACK_EVENT_FIELDNAMES = ['_id','event_id','agent','event_source','event_type','ip','page','session','time','anon_screen_name','downtime_for','student_id','instructor_id','course_id','course_display_name','resource_display_name','organization','sequence_id','goto_from','goto_dest','problem_id','problem_choice','question_location','submission_id','attempts','long_answer','student_file','can_upload_file','feedback','feedback_response_selected','transcript_id','transcript_code','rubric_selection','rubric_category','video_id','video_code','video_current_time','video_speed','video_old_time','video_new_time','video_seek_type','video_new_speed','video_old_speed','book_interaction_type','success','answer_id','hint','hintmode','msg','npoints','queuestate','orig_score','new_score','orig_total','new_total','event_name','group_user','group_action','position','badly_formatted','correctMap_fk','answer_fk','state_fk','load_info_fk']
def __init__(self, edx_track_event, answer=cfg.ANSWER, correct_map=cfg.CORRECT_MAP):
# Create a CSV reader for the EdxTrackEvent table
try:
events = open(edx_track_event)
self.edx_track_event = csv.DictReader( events,
fieldnames=self.EDX_TRACK_EVENT_FIELDNAMES,
delimiter=',',
quotechar=cfg.QUOTECHAR,
escapechar='\\')
except IOError as e:
print 'Unable to open EdxTrackEvent file : %s'% cfg.EDX_TRACK_EVENT
exit
# Load Answer and CorrectMap tables into pandas DataFrames,
# indexed by the table's primary key.
try:
self.answer = pd.read_csv(answer, delimiter=',', quotechar=cfg.QUOTECHAR, escapechar='\\', na_filter=False, index_col=0, names=self.ANSWER_FIELDNAMES, dtype='string')
self.correct_map = pd.read_csv(correct_map, delimiter=',', quotechar=cfg.QUOTECHAR, escapechar='\\', na_filter=False, index_col=0, names=self.CORRECT_MAP_FIELDNAMES,dtype='string')
except Exception as e:
print 'Pandas unable to load CSV :'
print str(e)
exit
def __iter__(self):
return self
def next(self):
event = self.edx_track_event.next()
# print '* Making joins'
self.get_foreign_values(event, 'answer_fk', ['answer'], self.answer)
self.get_foreign_values(event, 'correctMap_fk', ['answer_identifier', 'correctness'], self.correct_map)
return event
def new_reader(self, input_file, field_names, delim=',', qtchar='\'', escchar='\\'):
try:
return
except IOError:
print '[CSVExtractor.new_reader] Could not open file : ' + input_file
return
def get_foreign_values(self, event, fkey_name, fval_names, dataframe):
'''
This method adds to the EdxTrackEvent row the relevant
fields fetched from a foreign table.
It performs the analog of a SQL join with fk_dict on foreign_key_name.
In case of conflict (foreign field holding same information and having
same name as local field), the local value is kept if non empty and
overridden otherwise.
foreign_key: value of the foreign key on which the join is performed
fk_dict: dictionary mapping foreign keys to foreign values
'''
fkey = event.get(fkey_name, None)
if fkey:
try:
frow = dataframe.loc[fkey]
for name in fval_names:
event[name] = frow.loc[name]
except Exception as e:
print 'Broken foreign key : %s'%fkey
print str(e)
exit
# If the foreign key is missing, set all foreign
# fields to ''
else:
for name in fval_names:
event[name]=''
class JSONExtractor(object):
def __init__(self, edx_track_event_log):
try:
self.logsToProcess = []
self.jsonParserInstance = edxTrackLogJSONParser.EdXTrackLogJSONParser()
if edx_track_event_log.endswith(".gz"):
self.events_log = gzip.open(edx_track_event_log)
else:
self.events_log = open(edx_track_event_log)
except IOError as e:
print 'Unable to open EdxTrackEvent log file : %s'% edx_track_event_log
exit
def __iter__(self):
return self
def next(self):
while True:
if len(self.logsToProcess) > 0:
return self.logsToProcess.pop()
jsonStr = self.events_log.next()
if jsonStr == '\n' or len(jsonStr) == 0:
continue
try:
self.logsToProcess = self.jsonParserInstance.processOneJSONObject(jsonStr)
# self.logsToProcess = json.loads(str(jsonStr))
except (ValueError, KeyError) as e:
print 'Ill formed JSON: %s\n%s' % (e,jsonStr)
# JSONToRelation.logger.warn('Line %s: bad JSON object: %s' % (self.makeFileCitation(), `e`))
#***************
# Uncomment to get stacktrace for the above caught errors:
#import sys
#import traceback
#traceback.print_tb(sys.exc_info()[2])
#***************
return None
| agpl-3.0 |
abimannans/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
ym-bob/TensorFlowBook | Titanic/03_skflow.py | 2 | 1255 | import pandas as pd
import tensorflow.contrib.learn as skflow
from sklearn import metrics
from sklearn.model_selection import train_test_split
from data_processing import get_test_data, get_train_data
train_data = get_train_data()
X = train_data[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare', 'Child',
'EmbarkedF', 'DeckF', 'TitleF', 'Honor']].as_matrix()
Y = train_data['Survived']
# split training data and validation set data
X_train, X_val, Y_train, Y_val = (
train_test_split(X, Y, test_size=0.1, random_state=42))
# skflow classifier
feature_cols = skflow.infer_real_valued_columns_from_input(X_train)
classifier = skflow.LinearClassifier(
feature_columns=feature_cols, n_classes=2)
classifier.fit(X_train, Y_train, steps=200)
score = metrics.accuracy_score(Y_val, classifier.predict(X_val))
print("Accuracy: %f" % score)
# predict on test dataset
test_data = get_test_data()
X = test_data[['Sex', 'Age', 'Pclass', 'SibSp', 'Parch', 'Fare', 'Child',
'EmbarkedF', 'DeckF', 'TitleF', 'Honor']].as_matrix()
predictions = classifier.predict(X)
submission = pd.DataFrame({
"PassengerId": test_data["PassengerId"],
"Survived": predictions
})
submission.to_csv("titanic-submission.csv", index=False)
| apache-2.0 |
DSLituiev/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
bxlab/HiFive_Paper | Scripts/HiCLib/mirnylab-hiclib-460c3fbc0f72/src/hiclib/fragmentHiC.py | 2 | 93010 | # (c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: Maksim Imakaev (imakaev@mit.edu)
"""
This is a module class for fragment-level Hi-C data analysis.
The base class "HiCdataset" can load, save and merge Hi-C datasets,
perform certain filters, and save binned heatmaps.
Additional class HiCStatistics contains methods to analyze HiC data
on a fragment level.
This includes read statistics, scalings, etc.
Input data
----------
When used together with iterative mapping, this class can load
files from h5dicts created by iterative mapping.
This method can also input any dictionary-like structure, such as a dictionary,
np.savez, etc. The minimal subset of information are positions of two reads,
but providing strand informations is adviced.
If restriction fragment assignment is not provided,
it will be automatically recalculated.
.. warning ::
1-bp difference in positions of restriction sites will force certain
algorithms, such as scaling calculations, to throw an exception. It is
adviced to supply restriction site data only if it was generated by
iterative mapping code.
Concepts
--------
All read data is stored in a synchronized h5dict-based dictionary of arrays.
Each variable has a fixed name and type, as specified in the self.vectors
variable. Whenever the variable is accessed from the program, it is loaded from
the h5dict.
Whenever a set of reads needs to be excluded from the dataset, a
:py:func:`maskFilter <HiCdataset.maskFilter>` method is called,
that goes over all datasets and overrides them.
This method automatically rebuilds fragments.
Filtering the data
------------------
This class has many build-in methods for filtering the data.
However, one can easily construct another filter as presented in
multiple one-liner examples below
.. code-block:: python
>>> Dset = HiCdataset(**kwargs)
>>> Dset.fragmentFilter((Dset.ufragmentlen >1000) * \
(Dset.ufragmentlen < 4000))
#Keep reads from fragments between 1kb and 4kb long.
>>> Dset.maskFilter(Dset.chrms1 == Dset.chrms2) #keep only cis reads
>>> Dset.maskFilter((Dset.chrms1 !=14) + (Dset.chrms2 !=14))
#Exclude all reads from chromosome 15 (yes, chromosomes are zero-based!)
>>> Dset.maskFilter(Dset.dist1 + Dset.dist2 > 500)
#Keep only random breaks, if 500 is maximum molecule length
-------------------------------------------------------------------------------
API documentation
-----------------
"""
import warnings
import os
import traceback
from copy import copy
from mirnylib.genome import Genome
import numpy as np
import gc
from hiclib.hicShared import binarySearch, sliceableDataset, mydtype, h5dictBinarySearch, mydtypeSorter, searchsorted
import mirnylib.h5dict
from mirnylib import numutils
from mirnylib.numutils import arrayInArray, \
uniqueIndex, fasterBooleanIndexing, fillDiagonal, arraySearch, \
arraySumByArray, externalMergeSort, chunkedBincount
import time
from textwrap import dedent
USE_NUMEXPR = True
import numexpr
import logging
log = logging.getLogger(__name__)
class HiCdataset(object):
"""Base class to operate on HiC dataset.
This class stores all information about HiC reads on a hard drive.
Whenever a variable corresponding to any record is used,
it is loaded/saved from/to the HDD.
If you apply any filters to a dataset, it will actually modify
the content of the current working file.
Thus, to preserve the data, loading datasets is advised. """
def __init__(self, filename, genome, enzymeName="fromGenome", maximumMoleculeLength=500,
inMemory=False, mode="a",tmpFolder = "/tmp", dictToStoreIDs="dict"):
"""
__init__ method
Initializes empty dataset by default.
If "override" is False, works with existing dataset.
Parameters
----------
filename : string
A filename to store HiC dataset in an HDF5 file.
genome : folder with genome, or Genome object
A folder with fastq files of the genome
and gap table from Genome browser.
Alternatively, mirnylib.genome.Genome object.
maximumMoleculeLength : int, optional
Maximum length of molecules in the HiC library,
used as a cutoff for dangling ends filter
inMemory : bool, optional
Create dataset in memory. Filename is ignored then,
but still needs to be specified.
mode : str
'r' - Readonly, file must exist
'r+' - Read/write, file must exist
'w' - Create file, overwrite if exists
'w-' - Create file, fail if exists
'a' - Read/write if exists, create otherwise (default)
dictToStoreIDs : dict-like or "dict" or "h5dict"
A dictionary to store rsite IDs. If "dict", then store them in memory.
If "h5dict", then creates default h5dict (in /tmp folder)
If other object, uses it, whether it is an h5dict or a dictionary
"""
# -->>> Important::: do not define any variables before vectors!!! <<<--
# These are fields that will be kept on a hard drive
# You can learn what variables mean from here too.
self.vectors = {
# chromosomes for each read.
"strands1": "bool", "strands2": "bool",
"chrms1": "int8", "chrms2": "int8",
# IDs of fragments. fragIDmult * chromosome + location
# distance to rsite
"cuts1": "int32", "cuts2": "int32",
}
self.vectors2 = {
"fraglens1": "int32", "fraglens2": "int32",
# fragment lengthes
"fragids1": "int64", "fragids2": "int64",
"mids1": "int32", "mids2": "int32",
# midpoint of a fragment, determined as "(start+end)/2"
"dists1": "int32", "dists2": "int32",
# precise location of cut-site
"distances": "int32",
# distance between fragments. If -1, different chromosomes.
# If -2, different arms.
}
self.vectors3 = {"rfragAbsIdxs1": "int32", "rfragAbsIdxs2": "int32", }
if dictToStoreIDs == "dict":
self.rfragIDDict = {}
elif dictToStoreIDs == "h5dict":
self.rfragIDDict = mirnylib.h5dict.h5dict()
else:
self.rfragIDDict = dictToStoreIDs
self.metadata = {}
self.tmpDir = tmpFolder
#-------Initialization of the genome and parameters-----
self.mode = mode
if type(genome) == str:
self.genome = Genome(genomePath=genome, readChrms=["#", "X"])
else:
self.genome = genome
assert isinstance(self.genome, Genome) # bla
if enzymeName == "fromGenome":
if self.genome.hasEnzyme() == False:
raise ValueError("Provide the genome with the enzyme or specify enzyme=...")
else:
self.genome.setEnzyme(enzymeName)
self.chromosomeCount = self.genome.chrmCount
self.fragIDmult = self.genome.fragIDmult # used for building heatmaps
self.rFragIDs = self.genome.rfragMidIds
self.rFragLens = np.concatenate(self.genome.rfragLens)
self.rFragMids = np.concatenate(self.genome.rfragMids)
self.rsites = self.genome.rsiteIds
# to speed up searchsorted we use positive-only numbers
self.rsitesPositive = self.rsites + 2 * self.fragIDmult
print "----> New dataset opened, genome %s, filename = %s" % (
self.genome.folderName, filename)
self.maximumMoleculeLength = maximumMoleculeLength
# maximum length of a molecule for SS reads
self.filename = os.path.abspath(os.path.expanduser(filename)) # File to save the data
self.chunksize = 10000000
# Chunk size for h5dict operation, external sorting, etc
self.inMemory = inMemory
#------Creating filenames, etc---------
if os.path.exists(self.filename) and (mode in ['w', 'a']):
print '----->!!!File already exists! It will be {0}\n'.format(
{"w": "deleted", "a": "opened in the append mode"}[mode])
if len(os.path.split(self.filename)[0]) != 0:
if not os.path.exists(os.path.split(self.filename)[0]):
warnings.warn("Folder in which you want to create file"
"do not exist: %s" % os.path.split(self.filename)[0])
try:
os.mkdir(os.path.split(self.filename)[0])
except:
raise IOError("Failed to create directory: %s" %
os.path.split(self.filename)[0])
self.h5dict = mirnylib.h5dict.h5dict(self.filename, mode=mode, in_memory=inMemory)
if "chrms1" in self.h5dict.keys():
self.N = len(self.h5dict.get_dataset("chrms1"))
if "metadata" in self.h5dict:
self.metadata = self.h5dict["metadata"]
def _setData(self, name, data):
"an internal method to save numpy arrays to HDD quickly"
if name not in self.vectors.keys():
raise ValueError("Attept to save data not "
"specified in self.vectors")
dtype = np.dtype(self.vectors[name])
data = np.asarray(data, dtype=dtype)
self.h5dict[name] = data
def _getData(self, name):
"an internal method to load numpy arrays from HDD quickly"
if name not in self.vectors.keys():
raise ValueError("Attept to load data not "
"specified in self.vectors")
return self.h5dict[name]
def _isSorted(self):
c1 = self._getVector("chrms1", 0, min(self.N, 10000))
cs = np.sort(c1)
if np.sum(c1 != cs) == 0:
return True
return False
def __getattribute__(self, x):
"""a method that overrides set/get operation for self.vectors
o that they're always on HDD"""
if x in ["vectors", "vectors2", "vectors3"]:
return object.__getattribute__(self, x)
if x in self.vectors.keys():
a = self._getData(x)
return a
elif (x in self.vectors2) or (x in self.vectors3):
return self._getVector(x)
else:
return object.__getattribute__(self, x)
def _getSliceableVector(self, name):
return sliceableDataset(self._getVector, name, self.N)
def _getVector(self, name, start=None, end=None):
if name in self.vectors:
if name in self.h5dict:
return self.h5dict.get_dataset(name)[start:end]
else:
raise ValueError("name {0} not in h5dict {1}".format(name, self.h5dict.path))
if name in self.vectors3:
datas = self.rfragIDDict
if name not in datas:
self._calculateRgragIDs()
assert name in datas
if hasattr(datas, "get_dataset"):
dset = datas.get_dataset(name)
else:
dset = datas[name]
return dset[start:end]
if name in self.vectors2:
if name == "fragids1":
return self.genome.rfragMidIds[self._getVector("rfragAbsIdxs1", start, end)]
elif name == "fragids2":
return self.genome.rfragMidIds[self._getVector("rfragAbsIdxs2", start, end)]
elif name == "fraglens1":
fl1 = self.rFragLens[self._getVector("rfragAbsIdxs1", start, end)]
fl1[self._getVector("chrms1", start, end) == -1] = 0
return fl1
elif name == "fraglens2":
fl2 = self.rFragLens[self._getVector("rfragAbsIdxs2", start, end)]
fl2[self._getVector("chrms2", start, end) == -1] = 0
return fl2
elif name == "dists1":
cutids1 = self._getVector("cuts1", start, end) + np.array(self._getVector("chrms1", start, end), dtype=np.int64) * self.fragIDmult
d1 = np.abs(cutids1 - self.rsites[self._getVector("rfragAbsIdxs1", start, end) + self._getVector("strands1", start, end) - 1])
d1[self._getVector("chrms1", start, end) == -1] = 0
return d1
elif name == "dists2":
cutids2 = self._getVector("cuts2", start, end) + np.array(self._getVector("chrms2", start, end), dtype=np.int64) * self.fragIDmult
d2 = np.abs(cutids2 - self.rsites[self._getVector("rfragAbsIdxs2", start, end) + self._getVector("strands2", start, end) - 1])
d2[self._getVector("chrms2", start, end) == -1] = 0
return d2
elif name == "mids1":
return self.rFragMids[self._getVector("rfragAbsIdxs1", start, end)]
elif name == "mids2":
return self.rFragMids[self._getVector("rfragAbsIdxs2", start, end)]
elif name == "distances":
dvec = np.abs(self._getVector("mids1", start, end) - self._getVector("mids2", start, end))
dvec[self.chrms1[start:end] != self.chrms2[start:end]] = -1
return dvec
raise "unknown vector: {0}".format(name)
def _calculateRgragIDs(self):
log.debug("Started calculating rfrag IDs")
for i in self.rfragIDDict.keys():
del self.rfragIDDict[i]
if hasattr(self.rfragIDDict, "add_empty_dataset"):
self.rfragIDDict.add_empty_dataset("rfragAbsIdxs1", (self.N,), "int32")
self.rfragIDDict.add_empty_dataset("rfragAbsIdxs2", (self.N,), "int32")
d1 = self.rfragIDDict.get_dataset("rfragAbsIdxs1")
d2 = self.rfragIDDict.get_dataset("rfragAbsIdxs2")
else:
self.rfragIDDict["rfragAbsIdxs1"] = np.empty(self.N, dtype=np.int32)
self.rfragIDDict["rfragAbsIdxs2"] = np.empty(self.N, dtype=np.int32)
d1 = self.rfragIDDict["rfragAbsIdxs1"]
d2 = self.rfragIDDict["rfragAbsIdxs2"]
constants = {"np":np, "binarySearch":binarySearch,
"rsites":self.rsitesPositive, "fragMult":self.fragIDmult,
"numexpr":numexpr}
code1 = dedent("""
id1 = numexpr.evaluate("(cuts1 + (chrms1+2) * fragMult + 7 * strands1 - 3)")
del cuts1
del chrms1
res = binarySearch(id1 ,rsites)
""")
self.evaluate(code1, ["chrms1", "strands1", "cuts1"], outVariable=("res", d1),
constants=constants, chunkSize=150000000)
code2 = dedent("""
id2 = numexpr.evaluate("(cuts2 + (chrms2 + 2) * fragMult + 7 * strands2 - 3) * (chrms2 >=0)")
del cuts2
del chrms2
res = binarySearch(id2 ,rsites)
""")
self.evaluate(code2, ["chrms2", "strands2", "cuts2"], outVariable=("res", d2),
constants=constants, chunkSize=150000000)
log.debug("Finished calculating rfrag IDs")
def __setattr__(self, x, value):
"""a method that overrides set/get operation for self.vectors
so that they're always on HDD"""
if x in ["vectors", "vectors2"]:
return object.__setattr__(self, x, value)
if x in self.vectors.keys():
self._setData(x, value)
elif x in self.vectors2:
raise
else:
return object.__setattr__(self, x, value)
def _dumpMetadata(self):
if self.mode in ["r"]:
warnings.warn(RuntimeWarning("Cannot dump metadata in read mode"))
return
try:
self.h5dict["metadata"] = self.metadata
except Exception, err:
print "-" * 20 + "Got Exception when saving metadata" + "-" * 20
traceback.print_exc()
print Exception, err
print "-" * 60
warnings.warn(RuntimeWarning("Got exception when saving metadata"))
def _checkConsistency(self):
"""
Internal method to automatically check consistency with the genome
Every time rebuildFragments is getting called
"""
c1 = self.chrms1
p1 = self.cuts1
if len(c1) > 1000000:
c1 = c1[::100]
p1 = p1[::100]
if c1.max() >= self.genome.chrmCount:
print 'Genome length', self.genome.chrmCount
print "Maximum chromosome", c1.max()
print "note that chromosomes are 0-based, so go",
print "from 0 to {0}".format(self.genome.chrmCount)
raise ValueError("Chromosomes do not fit in the genome")
maxPos = self.genome.chrmLens[c1]
dif = p1 - maxPos
if dif.max() > 0:
print "Some reads map after chromosome end"
print 'However, deviation of {0} is not big enough to call an error'.format(dif.max())
warnings.warn("Reads map {0} bp after the end of chromosome".format(dif.max()))
if dif.max() > 100:
print "Possible genome mismatch found"
print 'Maximum deviation is {0}'.format(dif.max())
for chrom in range(self.genome.chrmCount):
posmax = (p1[c1 == chrom]).max()
chrLens = self.genome.chrmLens[chrom]
if posmax > chrLens:
print "Maximum position for chr {0} is {1}".format(chrom, posmax)
print "Length of chr {0} is {1}".format(chrom, chrLens)
raise ValueError("Wrong chromosome lengths")
def _getChunks(self, chunkSize="default"):
if chunkSize == "default":
chunkSize = self.chunksize
if chunkSize > 0.5 * self.N:
return [(0, self.N)]
points = range(0, self.N - chunkSize / 2, chunkSize) + [self.N]
return zip(points[:-1], points[1:])
def _sortData(self):
"""
Orders data such that chrms1 is always more than chrms2, and sorts it by chrms1, cuts1
"""
log.debug("Starting sorting data: making the file")
if not hasattr(self, "dataSorted"):
tmpFile = os.path.join(self.tmpDir, str(np.random.randint(0, 100000000)))
mydict = mirnylib.h5dict.h5dict(tmpFile,'w')
data = mydict.add_empty_dataset("sortedData", (self.N,), mydtype)
tmp = mydict.add_empty_dataset("trash", (self.N,), mydtype)
code = dedent("""
a = np.empty(len(chrms1), dtype = mydtype)
mask = chrms1 > chrms2
chrms2[mask],chrms1[mask] = chrms1[mask].copy(), chrms2[mask].copy()
cuts1[mask],cuts2[mask] = cuts2[mask].copy(), cuts1[mask].copy()
strands1[mask],strands2[mask] = strands2[mask].copy(),strands1[mask].copy()
a["chrms1"] = chrms1
a["pos1"] = cuts1
a["chrms2"] = chrms2
a["pos2"] = cuts2
a["strands1"] = strands1
a["strands2"] = strands2
""")
self.evaluate(expression=code, internalVariables = ["chrms1","chrms2","cuts1","cuts2","strands1","strands2"],
constants = {"np":np,"mydtype":mydtype}, outVariable = ("a",data))
log.debug("Invoking sorter")
externalMergeSort(data,tmp, sorter=mydtypeSorter,searchsorted=searchsorted)
log.debug("Getting data back")
sdata = mydict.get_dataset("sortedData")
c1 = self.h5dict.get_dataset("chrms1")
c2 = self.h5dict.get_dataset("chrms2")
p1 = self.h5dict.get_dataset("cuts1")
p2 = self.h5dict.get_dataset("cuts2")
s1 = self.h5dict.get_dataset("strands1")
s2 = self.h5dict.get_dataset("strands2")
for start,end in self._getChunks():
data = sdata[start:end]
c1[start:end] = data["chrms1"]
c2[start:end] = data["chrms2"]
p1[start:end] = data["pos1"]
p2[start:end] = data["pos2"]
s1[start:end] = data["strands1"]
s2[start:end] = data["strands2"]
self.dataSorted = True
del mydict
os.remove(tmpFile)
gc.collect()
log.debug("Finished")
def evaluate(self, expression, internalVariables, externalVariables={},
constants={"np": np},
outVariable="autodetect",
chunkSize="default"):
"""
Still experimental class to perform evaluation of
any expression on hdf5 datasets
Note that out_variable should be writable by slices.
---If one can provide autodetect of values for internal
variables by parsing an expression, it would be great!---
.. note ::
See example of usage of this class in filterRsiteStart,
parseInputData, etc.
.. warning ::
Please avoid passing internal variables
as "self.cuts1" - use "cuts1"
.. warning ::
You have to pass all the modules and functions (e.g. np)
in a "constants" dictionary.
Parameters
----------
expression : str
Mathematical expression, single or multi line
internal_variables : list of str
List of variables ("chrms1", etc.), used in the expression
external_variables : dict , optional
Dict of {str:array}, where str indicates name of the variable,
and array - value of the variable.
constants : dict, optional
Dictionary of constants to be used in the evaluation.
Because evaluation happens without namespace,
you should include numpy here if you use it (included by default)
out_variable : str or tuple or None, optional
Variable to output the data. Either internal variable, or tuple
(name,value), where value is an array
"""
if type(internalVariables) == str:
internalVariables = [internalVariables]
# detecting output variable automatically
if outVariable == "autodetect":
outVariable = expression.split("\n")[-1].split("=")[0].strip()
if outVariable not in self.vectors:
outVariable = (outVariable, "ToDefine")
code = compile(expression, '<string>', 'exec')
# compile because we're launching it many times
for start, end in self._getChunks(chunkSize):
variables = copy(constants)
variables["start"] = start
variables["end"] = end
# dictionary to pass to the evaluator.
# It's safer than to use the default locals()
for name in internalVariables:
variables[name] = self._getVector(name, start, end)
for name, variable in externalVariables.items():
variables[name] = variable[start:end]
# actually execute the code in our own namespace
exec code in variables
# autodetecting output dtype on the first run if not specified
if outVariable[1] == "ToDefine":
dtype = variables[outVariable[0]].dtype
outVariable = (outVariable[0], np.zeros(self.N, dtype))
if type(outVariable) == str:
self.h5dict.get_dataset(outVariable)[start:end] = variables[outVariable]
elif len(outVariable) == 2:
outVariable[1][start:end] = variables[outVariable[0]]
elif outVariable is None:
pass
else:
raise ValueError("Please provide str or (str,value)"
" for out variable")
if type(outVariable) == tuple:
return outVariable[1]
def merge(self, filenames):
"""combines data from multiple datasets
Parameters
----------
filenames : list of strings
List of folders to merge to current working folder
"""
log.debug("Starting merge; number of datasets = {0}".format(len(filenames)))
if self.filename in filenames:
raise StandardError("----> Cannot merge folder into itself! "
"Create a new folder")
for filename in filenames:
if not os.path.exists(filename):
raise IOError("\nCannot open file: %s" % filename)
log.debug("Getting h5dicts")
h5dicts = [mirnylib.h5dict.h5dict(i, mode='r') for i in filenames]
if all(["metadata" in i for i in h5dicts]):
metadatas = [mydict["metadata"] for mydict in h5dicts]
# print metadatas
newMetadata = metadatas.pop()
for oldData in metadatas:
for key, value in oldData.items():
if (key in newMetadata):
try:
newMetadata[key] += value
except:
print "Values {0} and {1} for key {2} cannot be added".format(metadatas[key], value, key)
warnings.warn("Cannot add metadatas")
else:
warnings.warn("key {0} not found in some files".format(key))
self.metadata = newMetadata
self.h5dict["metadata"] = self.metadata
log.debug("Calculating final length")
self.N = sum([len(i.get_dataset("strands1")) for i in h5dicts])
log.debug("Final length equals: {0}".format(self.N))
for name in self.vectors.keys():
log.debug("Processing vector {0}".format(name))
if name in self.h5dict:
del self.h5dict[name]
self.h5dict.add_empty_dataset(name, (self.N,), self.vectors[name])
dset = self.h5dict.get_dataset(name)
position = 0
for mydict in h5dicts:
cur = mydict[name]
dset[position:position + len(cur)] = cur
position += len(cur)
self.h5dict.flush()
time.sleep(0.2) # allow buffers to flush
log.debug("sorting data")
self._sortData()
log.debug("Finished merge")
def parseInputData(self, dictLike, zeroBaseChrom=True,
**kwargs):
"""
__NOT optimized for large datasets__
(use chunking as suggested in pipeline2015)
Inputs data from a dictionary-like object,
containing coordinates of the reads.
Performs filtering of the reads.
A good example of a dict-like object is a numpy.savez
.. warning::
Restriction fragments MUST be specified
exactly as in the Genome class.
.. warning::
Strand information is needed for proper scaling
calculations, but will be imitated if not provided
Parameters
----------
dictLike : dict or dictLike object, or string with h5dict filename
Input reads
dictLike["chrms1,2"] : array-like
Chromosomes of 2 sides of the read
dictLike["cuts1,2"] : array-like
Exact position of cuts
dictLike["strands1,2"], essential : array-like
Direction of the read
dictLike["rsites1,2"], optional : array-like
Position of rsite to which the read is pointing
dictLike["uprsites1,2"] , optional : array-like
rsite upstream (larger genomic coordinate) of the cut position
dictLike["downrsites1,2"] , optional : array-like
rsite downstream (smaller genomic coordinate) of the cut position
zeroBaseChrom : bool , optional
Use zero-base chromosome counting if True, one-base if False
enzymeToFillRsites : None or str, optional if rsites are specified
Enzyme name to use with Bio.restriction
removeSS : bool, optional
If set to True, removes SS reads from the library
noFiltering : bool, optional
If True then no filters are applied to the data. False by default.
Overrides removeSS. Experimental, do not use if you are not sure.
"""
if type(dictLike) == str:
if not os.path.exists(dictLike):
raise IOError("File not found: %s" % dictLike)
print " loading data from file %s (assuming h5dict)" % dictLike
dictLike = mirnylib.h5dict.h5dict(dictLike, 'r') # attempting to open h5dict
"---Filling in chromosomes and positions - mandatory objects---"
a = dictLike["chrms1"]
self.trackLen = len(a)
if zeroBaseChrom == True:
self.chrms1 = a
self.chrms2 = dictLike["chrms2"]
else:
self.chrms1 = a - 1
self.chrms2 = dictLike["chrms2"] - 1
self.N = len(self.chrms1)
del a
self.cuts1 = dictLike['cuts1']
self.cuts2 = dictLike['cuts2']
if not (("strands1" in dictLike.keys()) and
("strands2" in dictLike.keys())):
warnings.warn("No strand information provided,"
" assigning random strands.")
t = np.random.randint(0, 2, self.trackLen)
self.strands1 = t
self.strands2 = 1 - t
del t
noStrand = True
else:
self.strands1 = dictLike["strands1"]
self.strands2 = dictLike["strands2"]
noStrand = False # strand information filled in
self.metadata["100_TotalReads"] = self.trackLen
try:
dictLike["misc"]["genome"]["idx2label"]
self.updateGenome(self.genome, oldGenome=dictLike["misc"]["genome"]["idx2label"], putMetadata=True)
except KeyError:
assumedGenome = Genome(self.genome.genomePath)
self.updateGenome(self.genome, oldGenome=assumedGenome, putMetadata=True)
warnings.warn("\n Genome not found in mapped data. \n"
"Assuming genome comes from the same folder with all chromosomes")
self.metadata["152_removedUnusedChromosomes"] = self.trackLen - self.N
self.metadata["150_ReadsWithoutUnusedChromosomes"] = self.N
# Discard dangling ends and self-circles
DSmask = (self.chrms1 >= 0) * (self.chrms2 >= 0)
self.metadata["200_totalDSReads"] = DSmask.sum()
self.metadata["201_DS+SS"] = len(DSmask)
self.metadata["202_SSReadsRemoved"] = len(DSmask) - DSmask.sum()
sameFragMask = self.evaluate("a = (fragids1 == fragids2)",
["fragids1", "fragids2"]) * DSmask
cutDifs = self.cuts2[sameFragMask] > self.cuts1[sameFragMask]
s1 = self.strands1[sameFragMask]
s2 = self.strands2[sameFragMask]
SSDE = (s1 != s2)
SS = SSDE * (cutDifs == s2)
SS_N = SS.sum()
SSDE_N = SSDE.sum()
sameFrag_N = sameFragMask.sum()
self.metadata["210_sameFragmentReadsRemoved"] = sameFrag_N
self.metadata["212_Self-Circles"] = SS_N
self.metadata["214_DandlingEnds"] = SSDE_N - SS_N
self.metadata["216_error"] = sameFrag_N - SSDE_N
mask = DSmask * (-sameFragMask)
del DSmask, sameFragMask
noSameFrag = mask.sum()
# Discard unused chromosomes
if noStrand == True:
# Can't tell if reads point to each other.
dist = self.evaluate("a = np.abs(cuts1 - cuts2)",
["cuts1", "cuts2"])
else:
# distance between sites facing each other
dist = self.evaluate("a = numexpr.evaluate('- cuts1 * (2 * strands1 -1) - "
"cuts2 * (2 * strands2 - 1)')",
["cuts1", "cuts2", "strands1", "strands2"],
constants={"numexpr":numexpr})
readsMolecules = self.evaluate(
"a = numexpr.evaluate('(chrms1 == chrms2)&(strands1 != strands2) & (dist >=0) &"
" (dist <= maximumMoleculeLength)')",
internalVariables=["chrms1", "chrms2", "strands1", "strands2"],
externalVariables={"dist": dist},
constants={"maximumMoleculeLength": self.maximumMoleculeLength, "numexpr":numexpr})
mask *= (readsMolecules == False)
extraDE = mask.sum()
self.metadata["220_extraDandlingEndsRemoved"] = -extraDE + noSameFrag
if mask.sum() == 0:
raise Exception(
'No reads left after filtering. Please, check the input data')
del dist
del readsMolecules
if not kwargs.get('noFiltering', False):
self.maskFilter(mask)
self.metadata["300_ValidPairs"] = self.N
del dictLike
def printMetadata(self, saveTo=None):
self._dumpMetadata()
for i in sorted(self.metadata):
if i[2] != "0":
print "\t\t",
elif i[1] != "0":
print "\t",
print i, self.metadata[i]
if saveTo != None:
with open(saveTo, 'w') as myfile:
for i in sorted(self.metadata):
if i[2] != "0":
myfile.write("\t\t")
elif i[1] != "0":
myfile.write("\t")
myfile.write(str(i))
myfile.write(": ")
myfile.write(str(self.metadata[i]))
myfile.write("\n")
def updateGenome(self, newGenome, oldGenome="current", putMetadata=False):
"""
__partially optimized for large datasets__
Updates dataset to a new genome, with a fewer number of chromosomes.
Use it to delete chromosomes.
By default, removes all DS reads with that chromosomes.
Parameters
----------
newGenome : Genome object
Genome to replace the old genome, with fewer chromosomes
removeSSreads : "trans"(default), "all" or "none"
"trans": remove all reads from deleted chromosomes,
ignore the rest.
"all": remove all SS reads from all chromosomes
"None": mark all trans reads as SS reads
putMetadata : bool (optional)
Writes metadata for M and Y reads
oldGenome : Genome object or idx2label of old genome, optional
"""
assert isinstance(newGenome, Genome)
newN = newGenome.chrmCount
if oldGenome == "current":
oldGenome = self.genome
upgrade = newGenome.upgradeMatrix(oldGenome)
if isinstance(oldGenome, Genome):
if oldGenome.hasEnzyme():
newGenome.setEnzyme(oldGenome.enzymeName)
oldGenome = oldGenome.idx2label
oldN = len(oldGenome.keys())
label2idx = dict(zip(oldGenome.values(), oldGenome.keys()))
chrms1 = np.array(self.chrms1, int)
chrms2 = np.array(self.chrms2, int)
SS = (chrms1 < 0) + (chrms2 < 0)
metadata = {}
if "M" in label2idx:
Midx = label2idx["M"]
M1 = chrms1 == Midx
M2 = chrms2 == Midx
mToM = (M1 * M2).sum()
mToAny = (M1 + M2).sum()
mToSS = ((M1 + M2) * SS).sum()
metadata["102_mappedSide1"] = (chrms1 >= 0).sum()
metadata["104_mappedSide2"] = (chrms2 >= 0).sum()
metadata["112_M-to-M_reads"] = mToM
metadata["114_M-to-Any_reads"] = mToAny
metadata["116_M-to-SS_reads"] = mToSS
metadata["118_M-to-DS_reads"] = mToAny - mToSS
if "Y" in label2idx:
Yidx = label2idx["Y"]
Y1 = chrms1 == Yidx
Y2 = chrms2 == Yidx
yToY = (Y1 * Y2).sum()
yToAny = (Y1 + Y2).sum()
yToSS = ((Y1 + Y2) * SS).sum()
metadata["122_Y-to-Y_reads"] = yToY
metadata["124_Y-to-Any_reads"] = yToAny
metadata["126_Y-to-SS_reads"] = yToSS
metadata["128_Y-to-DS_reads"] = yToAny - yToSS
if putMetadata:
self.metadata.update(metadata)
if oldN == newN:
return None
if upgrade is not None:
upgrade[upgrade == -1] = 9999 # to tell old SS reads from new SS reads
chrms1 = upgrade[chrms1]
self.chrms1 = chrms1
del chrms1
chrms2 = upgrade[chrms2]
self.chrms2 = chrms2
"Keeping only DS reads"
mask = ((self.chrms1 < newN) * (self.chrms2 < newN))
self.genome = newGenome
self.maskFilter(mask)
def buildAllHeatmap(self, resolution, countDiagonalReads="Once",
useWeights=False):
"""
__optimized for large datasets__
Creates an all-by-all heatmap in accordance with mapping
provided by 'genome' class
Parameters
----------
resolution : int or str
Resolution of a heatmap. May be an int or 'fragment' for
restriction fragment resolution.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
useWeights : bool
If True, then take weights from 'weights' variable. False by default.
"""
for start,end in self._getChunks(30000000):
if type(resolution) == int:
# 8 bytes per record + heatmap
self.genome.setResolution(resolution)
numBins = self.genome.numBins
label = self.genome.chrmStartsBinCont[self._getVector("chrms1", start, end)]
label = np.asarray(label, dtype="int64")
label += self._getVector("mids1",start,end) / resolution
label *= numBins
label += self.genome.chrmStartsBinCont[self._getVector("chrms2",start,end)]
label += self._getVector("mids2",start,end) / resolution
elif resolution == 'fragment':
numBins = self.genome.numRfrags
label = self._getVector("rfragAbsIdxs1",start,end)
label *= numBins
label += self._getVector("rfragAbsIdxs2",start,end)
else:
raise Exception('Unknown value for resolution: {0}'.format(
resolution))
if useWeights:
if 'weights' not in self.vectors:
raise Exception('Set read weights first!')
counts = np.bincount(label, weights=self.fragmentWeights, minlength=numBins ** 2)
else:
counts = np.bincount(label, minlength=numBins ** 2)
if len(counts) > numBins ** 2:
raise StandardError("\nheatmap exceed length of the genome!!!"
" Check genome")
counts.shape = (numBins, numBins)
try:
heatmap += counts # @UndefinedVariable
except:
heatmap = counts
for i in xrange(len(heatmap)):
heatmap[i, i:] += heatmap[i:, i]
heatmap[i:, i] = heatmap[i, i:]
if countDiagonalReads.lower() == "once":
diag = np.diag(heatmap)
fillDiagonal(heatmap, diag / 2)
elif countDiagonalReads.lower() == "twice":
pass
else:
raise ValueError("Bad value for countDiagonalReads")
return heatmap
def buildHeatmapWithOverlapCpp(self, resolution, countDiagonalReads="Twice",
maxBinSpawn=10):
"""
__NOT optimized for large datasets__
Creates an all-by-all heatmap in accordance with mapping
provided by 'genome' class
This method assigns fragments to all bins which
the fragment overlaps, proportionally
Parameters
----------
resolution : int or str
Resolution of a heatmap. May be an int or 'fragment' for
restriction fragment resolution.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
maxBinSpawn : int, optional, not more than 10
Discard read if it spawns more than maxBinSpawn bins
"""
if type(resolution) == int:
# many bytes per record + heatmap
self.genome.setResolution(resolution)
N = self.N
N = int(N)
low1 = self.genome.chrmStartsBinCont[self.chrms1]
low1 = np.asarray(low1, dtype="float32")
low1 += (self.mids1 - self.fraglens1 / 2) / float(resolution)
high1 = self.genome.chrmStartsBinCont[self.chrms1]
high1 = np.asarray(high1, dtype="float32")
high1 += (self.mids1 + self.fraglens1 / 2) / float(resolution)
low2 = self.genome.chrmStartsBinCont[self.chrms2]
low2 = np.asarray(low2, dtype="float32")
low2 += (self.mids2 - self.fraglens2 / 2) / float(resolution)
high2 = self.genome.chrmStartsBinCont[self.chrms2]
high2 = np.asarray(high2, dtype="float32")
high2 += (self.mids2 + self.fraglens2 / 2) / float(resolution)
heatmap = np.zeros((self.genome.numBins, self.genome.numBins),
dtype="float64", order="C")
heatmapSize = len(heatmap) # @UnusedVariable
from scipy import weave
code = """
#line 1045 "fragmentHiC.py"
double vector1[100];
double vector2[100];
for (int readNum = 0; readNum < N; readNum++)
{
for (int i=0; i<10; i++)
{
vector1[i] = 0;
vector2[i] = 0;
}
double l1 = low1[readNum];
double l2 = low2[readNum];
double h1 = high1[readNum];
double h2 = high2[readNum];
if ((h1 - l1) > maxBinSpawn) continue;
if ((h2 - l2) > maxBinSpawn) continue;
int binNum1 = ceil(h1) - floor(l1);
int binNum2 = ceil(h2) - floor(l2);
double binLen1 = h1 - l1;
double binLen2 = h2 - l2;
int b1 = floor(l1);
int b2 = floor(l2);
if (binNum1 == 1)
vector1[0] = 1.;
else
{
vector1[0] = (ceil(l1 + 0.00001) - l1) / binLen1;
for (int t = 1; t< binNum1 - 1; t++)
{vector1[t] = 1. / binLen1;}
vector1[binNum1 - 1] = (h1 - floor(h1)) / binLen1;
}
if (binNum2 == 1) vector2[0] = 1.;
else
{
vector2[0] = (ceil(l2 + 0.0001) - l2) / binLen2;
for (int t = 1; t< binNum2 - 1; t++)
{vector2[t] = 1. / binLen2;}
vector2[binNum2 - 1] = (h2 - floor(h2)) / binLen2;
}
for (int i = 0; i< binNum1; i++)
{
for (int j = 0; j < binNum2; j++)
{
heatmap[(b1 + i) * heatmapSize + b2 + j] += vector1[i] * vector2[j];
}
}
}
"""
weave.inline(code,
['low1', "high1", "low2", "high2",
"N", "heatmap", "maxBinSpawn",
"heatmapSize",
],
extra_compile_args=['-march=native -O3 '],
support_code=r"""
#include <stdio.h>
#include <math.h>""")
counts = heatmap
for i in xrange(len(counts)):
counts[i, i:] += counts[i:, i]
counts[i:, i] = counts[i, i:]
diag = np.diag(counts)
if countDiagonalReads.lower() == "once":
fillDiagonal(counts, diag / 2)
elif countDiagonalReads.lower() == "twice":
pass
else:
raise ValueError("Bad value for countDiagonalReads")
return counts
def getHiResHeatmapWithOverlaps(self, resolution, chromosome, start = 0, end = None, countDiagonalReads="Twice", maxBinSpawn=10):
c1 = self.h5dict.get_dataset("chrms1")
p1 = self.h5dict.get_dataset("cuts1")
print "getting heatmap", chromosome, start, end
from scipy import weave
if end == None:
end = self.genome.chrmLens[chromosome]
low = h5dictBinarySearch(c1,p1, (chromosome, start),"left")
high = h5dictBinarySearch(c1,p1, (chromosome, end),"right")
c1 = self._getVector("chrms1", low,high)
c2 = self._getVector("chrms2", low,high)
mids1 = self._getVector("mids1", low,high)
mids2 = self._getVector("mids2", low,high)
fraglens1 = self._getVector("fraglens1", low,high)
fraglens2 = self._getVector("fraglens2",low,high)
mask = (c1 == c2) * (mids2 >= start) * (mids2 < end)
mids1 = mids1[mask]
mids2 = mids2[mask]
fraglens1 = fraglens1[mask]
fraglens2 = fraglens2[mask]
low1 = mids1 - fraglens1 / 2 - start
high1 = low1 + fraglens1
low2 = mids2 - fraglens2 / 2 - start
high2 = low2 + fraglens2
low1 = low1 / float(resolution)
high1 = high1 / float(resolution)
low2 = low2 / float(resolution)
high2 = high2 / float(resolution)
N = len(low1) # @UnusedVariable
if chromosome == 1:
pass
#0/0
heatmapSize = int(np.ceil((end - start) / float(resolution)))
heatmap = np.zeros((heatmapSize, heatmapSize),
dtype="float64", order="C")
code = r"""
#line 1045 "fragmentHiC.py"
double vector1[1000];
double vector2[1000];
for (int readNum = 0; readNum < N; readNum++)
{
for (int i=0; i<10; i++)
{
vector1[i] = 0;
vector2[i] = 0;
}
double l1 = low1[readNum];
double l2 = low2[readNum];
double h1 = high1[readNum];
double h2 = high2[readNum];
if ((h1 - l1) > maxBinSpawn) continue;
if ((h2 - l2) > maxBinSpawn) continue;
int binNum1 = ceil(h1) - floor(l1);
int binNum2 = ceil(h2) - floor(l2);
double binLen1 = h1 - l1;
double binLen2 = h2 - l2;
int b1 = floor(l1);
int b2 = floor(l2);
if (binNum1 == 1)
vector1[0] = 1.;
else
{
vector1[0] = (ceil(l1+ 0.00000001) - l1) / binLen1;
for (int t = 1; t< binNum1 - 1; t++)
{vector1[t] = 1. / binLen1;}
vector1[binNum1 - 1] = (h1 - floor(h1)) / binLen1;
}
if (binNum2 == 1) vector2[0] = 1.;
else
{
vector2[0] = (ceil(l2 + 0.00000001) - l2) / binLen2;
for (int t = 1; t< binNum2 - 1; t++)
{vector2[t] = 1. / binLen2;}
vector2[binNum2 - 1] = (h2 - floor(h2)) / binLen2;
}
if ((b1 + binNum1) >= heatmapSize) { continue;}
if ((b2 + binNum2) >= heatmapSize) { continue;}
if ((b1 < 0)) {continue;}
if ((b2 < 0)) {continue;}
double psum = 0;
for (int i = 0; i< binNum1; i++)
{
for (int j = 0; j < binNum2; j++)
{
heatmap[(b1 + i) * heatmapSize + b2 + j] += vector1[i] * vector2[j];
psum += vector1[i] * vector2[j];
}
}
if (abs(psum-1) > 0.0000001)
{
printf("bins num 1 = %d \n",binNum1);
printf("bins num 2 = %d \n",binNum2);
printf("psum = %f \n", psum);
}
}
"""
weave.inline(code,
['low1', "high1", "low2", "high2",
"N", "heatmap", "maxBinSpawn",
"heatmapSize",
],
extra_compile_args=['-march=native -O3 '],
support_code=r"""
#include <stdio.h>
#include <math.h>
""")
for i in xrange(len(heatmap)):
heatmap[i, i:] += heatmap[i:, i]
heatmap[i:, i] = heatmap[i, i:]
if countDiagonalReads.lower() == "once":
diag = np.diag(heatmap).copy()
fillDiagonal(heatmap, diag / 2)
del diag
elif countDiagonalReads.lower() == "twice":
pass
else:
raise ValueError("Bad value for countDiagonalReads")
weave.inline("") # to release all buffers of weave.inline
gc.collect()
return heatmap
def saveHiResHeatmapWithOverlaps(self, filename, resolution, countDiagonalReads="Twice", maxBinSpawn=10, chromosomes="all"):
"""Creates within-chromosome heatmaps at very high resolution,
assigning each fragment to all the bins it overlaps with,
proportional to the area of overlaps.
Parameters
----------
resolution : int or str
Resolution of a heatmap.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
maxBinSpawn : int, optional, not more than 10
Discard read if it spawns more than maxBinSpawn bins
"""
if not self._isSorted():
print "Data is not sorted!!!"
self._sortData()
tosave = mirnylib.h5dict.h5dict(filename)
if chromosomes == "all":
chromosomes = range(self.genome.chrmCount)
for chrom in chromosomes:
heatmap = self.getHiResHeatmapWithOverlaps(resolution, chrom,
countDiagonalReads = countDiagonalReads, maxBinSpawn = maxBinSpawn)
tosave["{0} {0}".format(chrom)] = heatmap
del heatmap
gc.collect()
print "----> By chromosome Heatmap saved to '{0}' at {1} resolution".format(filename, resolution)
def saveSuperHighResMapWithOverlaps(self, filename, resolution, chunkSize = 20000000, chunkStep = 10000000, countDiagonalReads="Twice", maxBinSpawn=10, chromosomes="all"):
"""Creates within-chromosome heatmaps at very high resolution,
assigning each fragment to all the bins it overlaps with,
proportional to the area of overlaps.
Parameters
----------
resolution : int or str
Resolution of a heatmap.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
maxBinSpawn : int, optional, not more than 10
Discard read if it spawns more than maxBinSpawn bins
"""
tosave = mirnylib.h5dict.h5dict(filename)
if chromosomes == "all":
chromosomes = range(self.genome.chrmCount)
for chrom in chromosomes:
chrLen = self.genome.chrmLens[chrom]
chunks = [(i * chunkStep, min(i * chunkStep + chunkSize, chrLen)) for i in xrange(chrLen / chunkStep + 1)]
for chunk in chunks:
heatmap = self.getHiResHeatmapWithOverlaps(resolution, chrom,
start = chunk[0], end = chunk[1],
countDiagonalReads = countDiagonalReads, maxBinSpawn = maxBinSpawn)
tosave["{0}_{1}_{2}".format(chrom, chunk[0], chunk[1])] = heatmap
print "----> Super-high-resolution heatmap saved to '{0}' at {1} resolution".format(filename, resolution)
def fragmentFilter(self, fragments):
"""
__optimized for large datasets__
keeps only reads that originate from fragments in 'fragments'
variable, for DS - on both sides
Parameters
----------
fragments : numpy.array of fragment IDs or bools
List of fragments to keep, or their indexes in self.rFragIDs
"""
if fragments.dtype == np.bool:
fragments = self.rFragIDs[fragments]
m1 = arrayInArray(self._getSliceableVector("fragids1"), fragments, chunkSize=self.chunksize)
m2 = arrayInArray(self._getSliceableVector("fragids2"), fragments, chunkSize=self.chunksize)
mask = np.logical_and(m1, m2)
self.maskFilter(mask)
def maskFilter(self, mask):
"""
__optimized for large datasets__
keeps only reads designated by mask
Parameters
----------
mask : array of bools
Indexes of reads to keep
"""
# Uses 16 bytes per read
for i in self.rfragIDDict.keys():
del self.rfragIDDict[i]
length = 0
ms = mask.sum()
assert mask.dtype == np.bool
self.N = ms
for name in self.vectors:
data = self._getData(name)
ld = len(data)
if length == 0:
length = ld
else:
if ld != length:
self.delete()
newdata = fasterBooleanIndexing(data, mask, outLen=ms,
bounds=False) # see mirnylib.numutils
del data
self._setData(name, newdata)
del newdata
del mask
def filterExtreme(self, cutH=0.005, cutL=0):
"""
__optimized for large datasets__
removes fragments with most and/or least # counts
Parameters
----------
cutH : float, 0<=cutH < 1, optional
Fraction of the most-counts fragments to be removed
cutL : float, 0<=cutL<1, optional
Fraction of the least-counts fragments to be removed
"""
print "----->Extreme fragments filter: remove top %lf, "\
"bottom %lf fragments" % (cutH, cutL)
s = self.fragmentSum()
ss = np.sort(s)
valueL, valueH = np.percentile(ss[ss > 0], [100. * cutL, 100 * (1. - cutH)])
news = (s >= valueL) * (s <= valueH)
N1 = self.N
self.fragmentFilter(self.rFragIDs[news])
self.metadata["350_removedFromExtremeFragments"] = N1 - self.N
self._dumpMetadata()
print " #Top fragments are: ", ss[-10:]
print " # Cutoff for low # counts is (counts): ", valueL,
print "; cutoff for large # counts is: ", valueH, "\n"
def filterLarge(self, cutlarge=100000, cutsmall=100):
"""
__optimized for large datasets__
removes very large and small fragments
Parameters
----------
cutlarge : int
remove fragments larger than it
cutsmall : int
remove fragments smaller than it
"""
print "----->Small/large fragments filter: keep strictly less"\
"than %d,strictly more than %d bp" % (cutlarge, cutsmall)
p = (self.rFragLens < (cutlarge)) * (self.rFragLens > cutsmall)
N1 = self.N
self.fragmentFilter(self.rFragIDs[p])
N2 = self.N
self.metadata["340_removedLargeSmallFragments"] = N1 - N2
self._dumpMetadata()
def filterRsiteStart(self, offset=5):
"""
__optimized for large datasets__
Removes reads that start within x bp near rsite
Parameters
----------
offset : int
Number of bp to exclude next to rsite, not including offset
"""
# TODO:(MI) fix this so that it agrees with the definition.
print "----->Semi-dangling end filter: remove guys who start %d"\
" bp near the rsite" % offset
expression = 'mask = numexpr.evaluate("(abs(dists1 - fraglens1) >= offset) & '\
'((abs(dists2 - fraglens2) >= offset))")'
mask = self.evaluate(expression,
internalVariables=["dists1", "fraglens1",
"dists2", "fraglens2"],
constants={"offset": offset, "np": np, "numexpr":numexpr},
outVariable=("mask", np.zeros(self.N, bool)))
self.metadata["310_startNearRsiteRemoved"] = len(mask) - mask.sum()
self.maskFilter(mask)
def filterDuplicates(self, mode="hdd", tmpDir="default", chunkSize = 100000000):
"""
__optimized for large datasets__
removes duplicate molecules"""
# Uses a lot!
print "----->Filtering duplicates in DS reads: "
if tmpDir == "default":
tmpDir = self.tmpDir
# an array to determine unique rows. Eats 1 byte per DS record
if mode == "ram":
log.debug("Filtering duplicates in RAM")
dups = np.zeros((self.N, 2), dtype="int64", order="C")
dups[:, 0] = self.chrms1
dups[:, 0] *= self.fragIDmult
dups[:, 0] += self.cuts1
dups[:, 1] = self.chrms2
dups[:, 1] *= self.fragIDmult
dups[:, 1] += self.cuts2
dups.sort(axis=1)
dups.shape = (self.N * 2)
strings = dups.view("|S16")
# Converting two indices to a single string to run unique
uids = uniqueIndex(strings)
del strings, dups
stay = np.zeros(self.N, bool)
stay[uids] = True # indexes of unique DS elements
del uids
elif mode == "hdd":
tmpFile = os.path.join(tmpDir, str(np.random.randint(0, 100000000)))
a = mirnylib.h5dict.h5dict(tmpFile)
a.add_empty_dataset("duplicates", (self.N,), dtype="|S24")
a.add_empty_dataset("temp", (self.N,), dtype="|S24")
dset = a.get_dataset("duplicates")
tempdset = a.get_dataset("temp")
code = dedent("""
tmp = np.array(chrms1, dtype=np.int64) * fragIDmult + cuts1
tmp2 = np.array(chrms2, dtype=np.int64) * fragIDmult + cuts2
newarray = np.zeros((len(tmp),3), dtype = np.int64)
newarray[:,0] = tmp
newarray[:,1] = tmp2
newarray[:,:2].sort(axis=1)
newarray[:,2] = np.arange(start, end, dtype=np.int64)
newarray.shape = (3*len(tmp))
a = np.array(newarray.view("|S24"))
assert len(a) == len(chrms1)
""")
self.evaluate(code, ["chrms1", "cuts1", "chrms2", "cuts2"],
constants={"np":np, "fragIDmult":self.fragIDmult},
outVariable=("a", dset))
stay = np.zeros(self.N, bool)
numutils.externalMergeSort(dset, tempdset, chunkSize=chunkSize)
bins = range(0, self.N - 1000, self.chunksize) + [self.N - 1]
for start, end in zip(bins[:-1], bins[1:]):
curset = dset[start:end + 1]
curset = curset.view(dtype=np.int64)
curset.shape = (len(curset) / 3, 3)
unique = (curset[:-1, 0] != curset[1:, 0]) + (curset[:-1, 1] != curset[1:, 1])
stay[curset[:, 2][unique]] = True
if end == self.N - 1:
stay[curset[-1, 2]] = True
del a
del tmpFile
self.metadata["320_duplicatesRemoved"] = len(stay) - stay.sum()
self.maskFilter(stay)
def filterByCisToTotal(self, cutH=0.0, cutL=0.01):
"""
__NOT optimized for large datasets__
Remove fragments with too low or too high cis-to-total ratio.
Parameters
----------
cutH : float, 0<=cutH < 1, optional
Fraction of the fragments with largest cis-to-total ratio
to be removed.
cutL : float, 0<=cutL<1, optional
Fraction of the fragments with lowest cis-to-total ratio
to be removed.
"""
concRfragAbsIdxs = np.r_[self.rfragAbsIdxs1, self.rfragAbsIdxs2]
concCis = np.r_[self.chrms1 == self.chrms2, self.chrms1 == self.chrms2]
cis = np.bincount(concRfragAbsIdxs[concCis])
total = np.bincount(concRfragAbsIdxs)
cistototal = np.nan_to_num(cis / total.astype('float'))
numEmptyFrags = (cistototal == 0).sum()
cutLFrags = int(np.ceil((len(cistototal) - numEmptyFrags) * cutL))
cutHFrags = int(np.ceil((len(cistototal) - numEmptyFrags) * cutH))
sortedCistotot = np.sort(cistototal)
lCutoff = sortedCistotot[cutLFrags + numEmptyFrags]
hCutoff = sortedCistotot[len(cistototal) - 1 - cutHFrags]
fragsToFilter = np.where((cistototal < lCutoff) + (cistototal > hCutoff))[0]
print ('Keep fragments with cis-to-total ratio in range ({0},{1}), '
'discard {2} fragments').format(lCutoff, hCutoff, cutLFrags + cutHFrags)
mask = (arrayInArray(self.rfragAbsIdxs1, fragsToFilter) +
arrayInArray(self.rfragAbsIdxs2, fragsToFilter))
self.metadata["330_removedByCisToTotal"] = mask.sum()
self.maskFilter(-mask)
def filterTooClose(self, minRsitesDist=2):
"""
__NOT optimized for large datasets__
Remove fragment pairs separated by less then `minRsitesDist`
restriction sites within the same chromosome.
"""
mask = (np.abs(self.rfragAbsIdxs1 - self.rfragAbsIdxs2) < minRsitesDist) * (self.chrms1 == self.chrms2)
self.metadata["360_closeFragmentsRemoved"] = mask.sum()
print '360_closeFragmentsRemoved: ', mask.sum()
self.maskFilter(-mask)
def filterOrientation(self):
"__NOT optimized for large datasets__"
# Keep only --> --> or <-- <-- pairs, discard --> <-- and <-- -->
mask = (self.strands1 == self.strands2)
self.metadata["370_differentOrientationReadsRemoved"] = mask.sum()
print '370_differentOrientationReadsRemoved: ', mask.sum()
self.maskFilter(-mask)
def writeFilteringStats(self):
self.metadata["400_readsAfterFiltering"] = self.N
sameChrom = self.chrms1 == self.chrms2
self.metadata["401_cisReads"] = sameChrom.sum()
self.metadata["402_transReads"] = self.N - sameChrom.sum()
self._dumpMetadata()
def fragmentSum(self, fragments=None, strands="both", useWeights=False):
"""
__optimized for large datasets__
returns sum of all counts for a set or subset of fragments
Parameters
----------
fragments : list of fragment IDs, optional
Use only this fragments. By default all fragments are used
strands : 1,2 or "both" (default)
Use only first or second side of the read
(first has SS, second - doesn't)
useWeights : bool, optional
If set to True, will give a fragment sum with weights adjusted for iterative correction.
"""
# Uses 0 bytes per read
if fragments is None:
fragments = self.rFragIDs
if not useWeights:
f1 = chunkedBincount(self._getSliceableVector("rfragAbsIdxs1"), minlength = len(self.rFragIDs))
f2 = chunkedBincount(self._getSliceableVector("rfragAbsIdxs2"), minlength = len(self.rFragIDs))
if strands == "both":
return f1 + f2
if strands == 1:
return f1
if strands == 2:
return f2
else:
if strands == "both":
pass1 = 1. / self.fragmentWeights[arraySearch(self.rFragIDs, self.fragids1)]
pass1 /= self.fragmentWeights[arraySearch(self.rFragIDs, self.fragids2)]
return arraySumByArray(self.fragids1, fragments, pass1) + arraySumByArray(self.fragids2, fragments, pass1)
else:
raise NotImplementedError("Sorry")
def iterativeCorrectionFromMax(self, minimumCount=50, precision=0.01):
"TODO: rewrite this to account for a new fragment model"
biases = np.ones(len(self.rFragMids), dtype = np.double)
self.fragmentWeights = 1. * self.fragmentSum()
self.fragmentFilter(self.fragmentWeights > minimumCount)
self.fragmentWeights = 1. * self.fragmentSum()
while True:
newSum = 1. * self.fragmentSum(useWeights=True)
biases *= newSum / newSum.mean()
maxDev = np.max(np.abs(newSum - newSum.mean())) / newSum.mean()
print maxDev
self.fragmentWeights *= (newSum / newSum.mean())
if maxDev < precision:
return biases
def printStats(self):
self.printMetadata()
def save(self, filename):
"Saves dataset to filename, does not change the working file."
if self.filename == filename:
raise StandardError("Cannot save to the working file")
newh5dict = mirnylib.h5dict.h5dict(filename, mode='w')
for name in self.vectors.keys():
newh5dict[name] = self.h5dict[name]
newh5dict["metadata"] = self.metadata
print "----> Data saved to file %s" % (filename,)
def load(self, filename, buildFragments="deprecated"):
"Loads dataset from file to working file; check for inconsistency"
otherh5dict = mirnylib.h5dict.h5dict(filename, 'r')
if "metadata" in otherh5dict:
self.metadata = otherh5dict["metadata"]
else:
print otherh5dict.keys()
warnings.warn("Metadata not found!!!")
length = 0
for name in self.vectors:
data = otherh5dict[name]
ld = len(data)
if length == 0:
length = ld
else:
if ld != length:
print("---->!!!!!File %s contains inconsistend data<----" %
filename)
self.exitProgram("----> Sorry...")
self._setData(name, data)
print "---->Loaded data from file %s, contains %d reads" % (
filename, length)
self.N = length
self._checkConsistency()
def saveHeatmap(self, filename, resolution=1000000,
countDiagonalReads="Once",
useWeights=False,
useFragmentOverlap=False, maxBinSpawn=10):
"""
Saves heatmap to filename at given resolution.
For small genomes where number of fragments per bin is small,
please set useFragmentOverlap to True.
This will assign each fragment to all bins over which the fragment
spawns.
Parameters
----------
filename : str
Filename of the output h5dict
resolution : int or str
Resolution of a heatmap. May be an int or 'fragment' for
restriction fragment resolution.
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
useWeights : bool
If True, then take weights from 'weights' variable. False by default.
If using iterativeCorrectionFromMax (fragment-level IC), use weights.
useFragmentOverlap : bool (optional)
Set this to true if you have few fragments per bin (bin size <20kb for HindIII)
It will consume more RAM and be slower.
"""
try:
os.remove(filename)
except:
pass
tosave = mirnylib.h5dict.h5dict(path=filename, mode="w")
if not useFragmentOverlap:
heatmap = self.buildAllHeatmap(resolution, countDiagonalReads, useWeights)
else:
heatmap = self.buildHeatmapWithOverlapCpp(resolution, countDiagonalReads, maxBinSpawn)
tosave["heatmap"] = heatmap
del heatmap
if resolution != 'fragment':
chromosomeStarts = np.array(self.genome.chrmStartsBinCont)
numBins = self.genome.numBins
else:
chromosomeStarts = np.array(self.genome.chrmStartsRfragCont)
numBins = self.genome.numRfrags
tosave["resolution"] = resolution
tosave["genomeBinNum"] = numBins
tosave["genomeIdxToLabel"] = self.genome.idx2label
tosave["chromosomeStarts"] = chromosomeStarts
print "----> Heatmap saved to '{0}' at {1} resolution".format(
filename, resolution)
def saveByChromosomeHeatmap(self, filename, resolution=10000,
includeTrans=True,
countDiagonalReads="Once"):
"""
Saves chromosome by chromosome heatmaps to h5dict.
This method is not as memory demanding as saving allxall heatmap.
Keys of the h5dict are of the format ["1 14"],
where chromosomes are zero-based,
and there is one space between numbers.
.. warning :: Chromosome numbers are always zero-based.
Only "chr3" labels are one-based in this package.
Parameters
----------
filename : str
Filename of the h5dict with the output
resolution : int
Resolution to save heatmaps
includeTrans : bool, optional
Build inter-chromosomal heatmaps (default: False)
countDiagonalReads : "once" or "twice"
How many times to count reads in the diagonal bin
"""
if countDiagonalReads.lower() not in ["once", "twice"]:
raise ValueError("Bad value for countDiagonalReads")
self.genome.setResolution(resolution)
mydict = mirnylib.h5dict.h5dict(filename)
for chromosome in xrange(self.genome.chrmCount):
c1 = self.h5dict.get_dataset("chrms1")
p1 = self.h5dict.get_dataset("cuts1")
low = h5dictBinarySearch(c1,p1, (chromosome, -1),"left")
high = h5dictBinarySearch(c1,p1, (chromosome, 999999999),"right")
chr1 = self._getVector("chrms1", low,high)
chr2 = self._getVector("chrms2", low,high)
pos1 = np.array(self._getVector("mids1", low,high) / resolution, dtype = np.int32)
pos2 = np.array(self._getVector("mids2", low,high) / resolution, dtype = np.int32)
if includeTrans == True:
mask = ((chr1 == chromosome) + (chr2 == chromosome))
chr1 = chr1[mask]
chr2 = chr2[mask]
pos1 = pos1[mask]
pos2 = pos2[mask]
# Located chromosomes and positions of chromosomes
if includeTrans == True:
# moving different chromosomes to c2
# c1 == chrom now
mask = (chr2 == chromosome) * (chr1 != chromosome)
chr1[mask], chr2[mask], pos1[mask], pos2[mask] = chr2[mask].copy(), chr1[
mask].copy(), pos2[mask].copy(), pos1[mask].copy()
args = np.argsort(chr2)
chr2 = chr2[args]
pos1 = pos1[args]
pos2 = pos2[args]
for chrom2 in xrange(chromosome, self.genome.chrmCount):
if (includeTrans == False) and (chrom2 != chromosome):
continue
start = np.searchsorted(chr2, chrom2, "left")
end = np.searchsorted(chr2, chrom2, "right")
cur1 = pos1[start:end]
cur2 = pos2[start:end]
label = np.array(cur1, "int64")
label *= self.genome.chrmLensBin[chrom2]
label += cur2
maxLabel = self.genome.chrmLensBin[chromosome] * \
self.genome.chrmLensBin[chrom2]
counts = np.bincount(label, minlength=maxLabel)
mymap = counts.reshape((self.genome.chrmLensBin[chromosome], -1))
if chromosome == chrom2:
mymap = mymap + mymap.T
if countDiagonalReads.lower() == "once":
fillDiagonal(mymap, np.diag(mymap).copy() / 2)
mydict["%d %d" % (chromosome, chrom2)] = mymap
print "----> By chromosome Heatmap saved to '{0}' at {1} resolution".format(filename, resolution)
return
def exitProgram(self, a):
print a
print " ----> Bye! :) <----"
exit()
def iterativeCorrection(self, numsteps=10, normToLen=False):
'''
Perform fragment-based iterative correction of Hi-C data.
'''
rfragLensConc = np.concatenate(self.genome.rfragLens)
weights = np.ones(self.N, dtype=np.float32)
concRfragAbsIdxs = np.r_[self.rfragAbsIdxs1, self.rfragAbsIdxs2]
concOrigArgs = np.r_[np.arange(0, self.N), np.arange(0, self.N)]
concArgs = np.argsort(concRfragAbsIdxs)
concRfragAbsIdxs = concRfragAbsIdxs[concArgs]
concOrigArgs = concOrigArgs[concArgs]
fragBorders = np.where(concRfragAbsIdxs[:-1] != concRfragAbsIdxs[1:])[0] + 1
fragBorders = np.r_[0, fragBorders, 2 * self.N]
rfragLensLocal = rfragLensConc[concRfragAbsIdxs[fragBorders[:-1]]]
for _ in range(numsteps):
for i in range(len(fragBorders) - 1):
mask = concOrigArgs[fragBorders[i]:fragBorders[i + 1]]
totWeight = weights[mask].sum()
if normToLen:
weights[mask] *= rfragLensLocal[i] / totWeight
else:
weights[mask] /= totWeight
self.vectors['weights'] = 'float32'
self.weights = weights
def plotScaling(self, fragids1=None, fragids2=None,
# IDs of fragments for which to plot scaling.
# One can, for example, limit oneself to
# only fragments shorter than 1000 bp
# Or calculate scaling only between different arms
useWeights=False,
# use weights associated with fragment length
excludeNeighbors=None, enzyme=None,
# number of neighboring fragments to exclude.
# Enzyme is needed for that!
normalize=True, normRange=None,
# normalize the final plot to sum to one
withinArms=True,
# Treat chromosomal arms separately
mindist=1000,
# Scaling was proved to be unreliable
# under 10000 bp for 6-cutter enzymes
maxdist=None,
#----Calculating scaling within a set of regions only----
regions=None,
# Array of tuples (chrom, start, end)
# for which scaling should be calculated
# Note that calculation might be extremely long
# (it might be proportional to # of regions for # > 100)
appendReadCount=True, **kwargs
# Append read count to the plot label
# kwargs to be passed to plotting
): # Sad smiley, because this method
# is very painful and complicated
"""plots scaling over, possibly uses subset of fragmetns, or weigts,
possibly normalizes after plotting
Plan of scaling calculation:
1. Subdivide all genome into regions. \n
a. Different chromosomes \n
b. Different arms \n
c. User defined squares/rectangles on a contact map \n
-(chromosome, start,end) square around the diagonal \n
-(chr, st1, end1, st2, end2) rectangle \n
2. Use either all fragments, or only interactions between
two groups of fragments \n
e.g. you can calculate how scaling for small fragments is different
from that for large \n
It can be possibly used for testing Hi-C protocol issues. \n
One can see effect of weights by doing this \n
3. (optional) Calculate correction associated
with fragment length dependence
4. Subdivide all possible genomic separation into log-spaced bins
5. Calculate expected number of fragment pairs within each bin
(possibly with weights from step 3).
If exclusion of neighbors is specificed,
expected number of fragments knows about this
Parameters
----------
fragids1, fragids2 : np.array of fragment IDs, optional
Scaling is calculated only for interactions between
fragids1 and fragids2
If omitted, all fragments are used
If boolean array is supplied, it serves as a mask for fragments.
useWeights : bool, optional
Use weights calculated from fragment length
excludeNeighbors : int or None, optional
If None, all fragment pairs are considered.
If integer, only fragment pairs separated
by at least this number of r-fragments are considered.
enzyme : string ("HindIII","NcoI")
If excludeNeighbors is used, you have to specify restriction enzyme
normalize : bool, optional
Do an overall normalization of the answer, by default True.
withinArms : bool, optional
Set to false to use whole chromosomes instead of arms
mindist, maxdist : int, optional
Use lengthes from mindist to maxdist
regions : list of (chrom,start,end) or (ch,st1,end1,st2,end2), optional
Restrict scaling calculation to only certain squares of the map
appendReadCount : bool, optional
Append read count to the plot label
plot : bool, optional
If False then do not display the plot. True by default.
**kwargs : optional
All other keyword args are passed to plt.plot
Returns
-------
(bins,probabilities) - values to plot on the scaling plot
"""
# TODO:(MI) write an ab-initio test for scaling calculation
if not self._isSorted():
self._sortData()
import matplotlib.pyplot as plt
if excludeNeighbors <= 0:
excludeNeighbors = None # Not excluding neighbors
# use all fragments if they're not specified
# parse fragment array if it's bool
if (fragids1 is None) and (fragids2 is None):
allFragments = True
else:
allFragments = False
if fragids1 is None:
fs = self.fragmentSum()
fragids1 = fs > 0
if fragids2 is None:
try:
fragids2 = fs > 0
except:
fragids2 = self.fragmentSum() > 0
del fs
if fragids1.dtype == np.bool:
fragids1 = self.rFragIDs[fragids1]
if fragids2.dtype == np.bool:
fragids2 = self.rFragIDs[fragids2]
# Calculate regions if not specified
if regions is None:
if withinArms == False:
regions = [(i, 0, self.genome.chrmLens[i])
for i in xrange(self.genome.chrmCount)]
else:
regions = [(i, 0, self.genome.cntrMids[i])
for i in xrange(self.genome.chrmCount)] + \
[(i, self.genome.cntrMids[i], self.genome.chrmLens[i])
for i in xrange(self.genome.chrmCount)]
if maxdist is None:
maxdist = max(
max([i[2] - i[1] for i in regions]),
# rectangular regions
max([abs(i[2] - i[3]) for i in regions if
len(i) > 3] + [0]),
max([abs(i[1] - i[4]) for i in regions if
len(i) > 3] + [0]) # other side
)
# Region to which a read belongs
fragch1 = fragids1 / self.fragIDmult
fragch2 = fragids2 / self.fragIDmult
fragpos1 = fragids1 % self.fragIDmult
fragpos2 = fragids2 % self.fragIDmult
c1_h5 = self.h5dict.get_dataset("chrms1")
p1_h5 = self.h5dict.get_dataset("cuts1")
c2_h5 = self.h5dict.get_dataset("chrms2")
p2_h5 = self.h5dict.get_dataset("cuts2")
bins = np.array(
numutils.logbins(mindist, maxdist, 1.12), float) + 0.1 # bins of lengths
numBins = len(bins) - 1 # number of bins
args = np.argsort(self.rFragIDs)
usort = self.rFragIDs[args]
if useWeights == True: # calculating weights if needed
try:
self.fragmentWeights
except:
self.calculateFragmentWeights()
uweights = self.fragmentWeights[args] # weights for sorted fragment IDs
weights1 = uweights[np.searchsorted(usort, fragids1)]
weights2 = uweights[np.searchsorted(usort, fragids2)
] # weghts for fragment IDs under consideration
numExpFrags = np.zeros(numBins) # count of reads in each min
values = [0] * (len(bins) - 1)
rawValues = [0] * (len(bins) - 1)
binBegs, binEnds = bins[:-1], bins[1:]
binMids = 0.5 * (binBegs + binEnds).astype(float)
binLens = binEnds - binBegs
for region in regions:
if len(region) == 3:
chrom, start1, end1 = region
low = h5dictBinarySearch(c1_h5,p1_h5, (chrom, start1),"left")
high = h5dictBinarySearch(c1_h5,p1_h5, (chrom, end1),"right")
if len(region) == 5:
chrom, start1, end1, start2, end2 = region
assert start1 < end1
assert start2 < end2
low = h5dictBinarySearch(c1_h5,p1_h5, (chrom, min(start1, start2)),"left")
high = h5dictBinarySearch(c1_h5,p1_h5, (chrom, max(end1, end2)),"right")
chr2 = c2_h5[low:high]
pos1 = p1_h5[low:high]
pos2 = p2_h5[low:high]
myfragids1 = self._getVector("fragids1", low,high)
myfragids2 = self._getVector("fragids2", low,high)
mystrands1 = self._getVector("strands1",low,high)
mystrands2 = self._getVector("strands2",low,high)
mydists = self._getVector("distances", low, high)
print "region",region,"low",low,"high",high
if len(region) == 3:
mask = (pos1 > start1) * (pos1 < end1) * \
(chr2 == chrom) * (pos2 > start1) * (pos2 < end1)
maskFrag1 = (fragch1 == chrom) * (fragpos1 >
start1) * (fragpos1 < end1)
maskFrag2 = (fragch2 == chrom) * (fragpos2 >
start1) * (fragpos2 < end1)
if len(region) == 5:
chrom, start1, end1, start2, end2 = region
mask1 = (chr2 == chrom) * (pos1 > start1) * \
(pos1 < end1) * (pos2 > start2) * (pos2 < end2)
mask2 = (chr2 == chrom) * (pos1 > start2) * \
(pos1 < end2) * (pos2 > start1) * (pos2 < end1)
mask = mask1 + mask2
maskFrag1 = (fragch1 == chrom) * (
(fragpos1 > start1) * (fragpos1 < end1)
+ (fragpos1 > start2) * (fragpos1 < end2))
maskFrag2 = (fragch2 == chrom) * (
(fragpos2 > start2) * (fragpos2 < end2)
+ (fragpos2 > start1) * (fragpos2 < end1))
if maskFrag1.sum() == 0 or maskFrag2.sum() == 0:
print "no fragments for region", region
continue
if mask.sum() == 0:
print "No reads for region", region
continue
chr2 = chr2[mask]
pos1 = pos1[mask]
pos2 = pos2[mask]
myfragids1 = myfragids1[mask]
myfragids2 = myfragids2[mask]
mystrands1 = mystrands1[mask]
mystrands2 = mystrands2[mask]
mydists = mydists[mask]
validFragPairs = np.ones(len(chr2), dtype = np.bool)
if allFragments == False:
# Filter the dataset so it has only the specified fragments.
p11 = arrayInArray(myfragids1, fragids1)
p12 = arrayInArray(myfragids1, fragids2)
p21 = arrayInArray(myfragids2, fragids1)
p22 = arrayInArray(myfragids2, fragids2)
validFragPairs *= ((p11 * p22) + (p12 * p21))
# Consider pairs of fragments from the same region.
# Keep only --> --> or <-- <-- pairs, discard --> <-- and <-- -->
validFragPairs *= (mystrands1 == mystrands2)
# Keep only fragment pairs more than excludeNeighbors fragments apart.
distsInFrags = self.genome.getFragmentDistance(
myfragids1, myfragids2, self.genome.enzymeName)
validFragPairs *= distsInFrags > excludeNeighbors
distances = np.sort(mydists[validFragPairs])
"calculating fragments lengths for exclusions to expected # of counts"
# sorted fragment IDs and lengthes
print region
# filtering fragments that correspond to current region
bp1, bp2 = fragpos1[maskFrag1], fragpos2[maskFrag2]
# positions of fragments on chromosome
p2arg = np.argsort(bp2)
p2 = bp2[p2arg] # sorted positions on the second fragment
if excludeNeighbors is not None:
"calculating excluded fragments (neighbors) and their weights"\
" to subtract them later"
excFrag1, excFrag2 = self.genome.getPairsLessThanDistance(
fragids1[mask1], fragids2[mask2], excludeNeighbors, enzyme)
excDists = np.abs(excFrag2 - excFrag1)
# distances between excluded fragment pairs
if useWeights == True:
correctionWeights = weights1[numutils.arraySearch(
fragids1, excFrag1)]
# weights for excluded fragment pairs
correctionWeights = correctionWeights * weights2[
numutils.arraySearch(fragids2, excFrag2)]
if useWeights == True:
w1, w2 = weights1[mask1], weights2[mask2]
sw2 = np.r_[0, np.cumsum(w2[p2arg])]
# cumsum for sorted weights on 2 strand
for minDist, maxDist, binIndex in zip(binBegs, binEnds, range(numBins)):
"Now calculating actual number of fragment pairs for a "\
"length-bin, or weight of all these pairs"
# For each first fragment in a pair, calculate total # of
# restriction fragments in the genome lying downstream within
# the bin.
val1 = np.searchsorted(p2, bp1 - maxDist)
val2 = np.searchsorted(p2, bp1 - minDist)
if useWeights == False:
curcount = np.sum(np.abs(val1 - val2)) # just # of fragments
else:
# (difference in cumsum of weights) * my weight
curcount = np.sum(w1 * np.abs(sw2[val1] - sw2[val2]))
# Repeat the procedure for the fragments lying upstream.
val1 = np.searchsorted(p2, bp1 + maxDist)
val2 = np.searchsorted(p2, bp1 + minDist)
if useWeights == False:
curcount += np.sum(np.abs(val1 - val2))
else:
curcount += np.sum(w1 * np.abs(sw2[val1] - sw2[val2]))
# now modifying expected count because of excluded fragments
if excludeNeighbors is not None:
if useWeights == False:
ignore = ((excDists > minDist) *
(excDists < maxDist)).sum()
else:
ignore = (correctionWeights[((excDists > minDist) * \
(excDists < maxDist))]).sum()
if (ignore >= curcount) and (ignore != 0):
if ignore < curcount * 1.0001:
curcount = ignore = 0
else:
print "error found", "minDist:", minDist
print " curcount:", curcount, " ignore:", ignore
else: # Everything is all right
curcount -= ignore
numExpFrags[binIndex] += curcount
#print curcount
for i in xrange(len(bins) - 1): # Dividing observed by expected
first, last = tuple(np.searchsorted(distances, [binBegs[i], binEnds[i]]))
mycounts = last - first
values[i] += (mycounts / float(numExpFrags[i]))
rawValues[i] += (mycounts)
#print "values", values
#print "rawValies", rawValues
values = np.array(values)
if normalize == True:
if normRange is None:
values /= np.sum(
1. * (binLens * values)[
np.logical_not(
np.isnan(binMids * values))])
else:
values /= np.sum(
1. * (binLens * values)[
np.logical_not(
np.isnan(binMids * values))
* (binMids > normRange[0])
* (binMids < normRange[1])])
do_plot = kwargs.pop('plot', True)
if do_plot:
if appendReadCount == True:
if "label" in kwargs.keys():
kwargs["label"] = kwargs["label"] + \
", %d reads" % len(distances)
plt.plot(binMids, values, **kwargs)
return (binMids, values)
def plotRsiteStartDistribution(self, offset=5, length=200):
"""
run plt.show() after this function.
"""
import matplotlib.pyplot as plt
dists1 = self.fraglens1 - np.array(self.dists1, dtype="int32")
dists2 = self.fraglens2 - np.array(self.dists2, dtype="int32")
m = min(dists1.min(), dists2.min())
if offset < -m:
offset = -m
print "minimum negative distance is %d, larger than offset;"\
" offset set to %d" % (m, -m)
dists1 += offset
dists2 += offset
myrange = np.arange(-offset, length - offset)
plt.subplot(141)
plt.title("strands1, side 1")
plt.plot(myrange, np.bincount(
5 + dists1[self.strands1 == True])[:length])
plt.subplot(142)
plt.title("strands1, side 2")
plt.plot(myrange, np.bincount(
dists2[self.strands1 == True])[:length])
plt.subplot(143)
plt.title("strands2, side 1")
plt.plot(myrange, np.bincount(
dists1[self.strands1 == False])[:length])
plt.subplot(144)
plt.title("strands2, side 2")
plt.plot(myrange, np.bincount(
dists2[self.strands1 == False])[:length])
| bsd-3-clause |
DSLituiev/scikit-learn | examples/calibration/plot_calibration.py | 33 | 4794 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
sintetizzatore/ThinkStats2 | code/thinkstats2.py | 68 | 68825 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
andrew0harney/Semantic-encoding-model | ldaUtils.py | 1 | 4140 | import glob
import re
import pickle
import numpy as np
import pandas as pd
import logging
from GridRegression import Encoder
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('__ldaUtils__')
"""Utility functions and classes for working with event(epoch) encodings"""
__author__ = 'Andrew O\Harney'
class LdaEncoding:
"""Stores LDA encoding results
Useful for enabling comparisons of encoding probabilities on a given topic"""
name = None
values = None
topicN = None
def __init__(self,name,values,topicN=0):
self.name = name
self.values = values
self.topicN = topicN
def __cmp__(self,y,topicN=None):
if topicN is None:
topicN = self.topicN
return np.sign(self.values[topicN]-y.values[topicN])
def __getitem__(self,topicN):
return self.values[topicN]
def __str__(self,topicN=None):
return self.name if topicN is None else self.name + ' ' + str(self.values[topicN])
def setTopicN(self,topicN):
#Topic number to compare probabilities of
self.topicN = topicN
def createLabeledCorpDict(labeledImageDictionaryName,sourceReg,output=None):
"""Creates labeled dictionary of corpora for referencing
Sample running:
INFO:__ldaUtils__:Processing .../labeled_image_maps/003770.labels.txt
INFO:__ldaUtils__:Processing .../labeled_image_maps/003771.labels.txt
INFO:__ldaUtils__:Processing .../labeled_image_maps/003772.labels.txt
Sample output:
{3770: ['man', 'bull', 'people', 'stadium', 'dirt'],
3771: ['grass', 'sky', 'trees', 'village'],
3772: ['seal', 'rocks']}
Keyword arguments:
labeledImageDictionaryName -- Name for the dictionary
sourceReg -- Regular expression to find labeled image files
Output -- Pickle the dictionary {true,false}"""
if not glob.glob(labeledImageDictionaryName):
docs = dict()
for tFile in glob.glob(sourceReg):
logger.info('Processing '+str(tFile))
a =open(tFile).read().splitlines()
doc=[]
for line in a:
line = re.findall(r"[\w']+",line)
if len(line)>1:
for item in line:
item = item.lower()
elif line != []:
item = line[0].lower()
doc.append(item)
docs[int(re.findall('[0-9]+', tFile)[0])] = list(set(doc))
#docs[ntpath.basename(tFile)] = list(set(doc))
if output is not None:
pickle.dump(docs, file(labeledImageDictionaryName,'w'))
return docs
else:
return pickle.load(file(labeledImageDictionaryName,'r'))
class LdaEncoder(Encoder):
"Class to encapsulate encoding of an event for a given LDA model"
#
__ldaDict__ = None
__ldaModel__= None
__docs__ = None
__modelWordList__ = None
__numClasses__ = None
#
def __init__(self,ldaDict,docs,lda):
#
self.__ldaDict__ = ldaDict
self.__ldaModel__ = lda
self.__numClasses__ = lda.num_topics
self.__docs__ = docs
self.__modelWordList__ = [self.__ldaModel__.id2word[wordid] for wordid in self.__ldaDict__] #Get valid words for this model
#
def numClasses(self):
return self.__numClasses__
#
def __getitem__(self,event,eps=0):
#Get stim fname
stimName = event['label']
#If it is a stimulus period
if stimName >= 0:
stimWords = self.__docs__[stimName] #Get the labels for the given stimulus
topicProbs= self.model().__getitem__(self.__ldaDict__.doc2bow([word for word in stimWords if word in self.__modelWordList__]),eps=eps) #Get the topic encoding
#Series with {topicNum:prob} structure
return pd.Series([tprob for (_,tprob) in topicProbs],index=[topicNum for (topicNum,_)in topicProbs])
else: #If it is an isi
return np.zeros([self.model().num_topics]) | mit |
dolremi/PiperLearn | piperlearn/analysis/process.py | 1 | 4743 | import pickle
import pickle
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import seaborn as sns
from scipy.special import boxcox1p
from scipy.stats import norm
class FeatureBuilder(object):
def __init__(self, inputfile, output):
_check_file(inputfile)
self.inputfile = inputfile
_check_file(output)
self.output = output
def read_data(self, verbose=False):
data = pickle.load(open(self.inputfile, 'rb'))
self.train_X = data['train_X']
self.train_Y = data['train_Y']
self.test_X = data['test_X']
self.test_Y = data['test_Y']
if verbose:
print("For training dataset: ")
print(self.train_X.shape)
print("The distribution of the training set: ")
print(self.train_Y.value_counts())
print("For test dataset: ")
print(self.test_X.shape)
print("The distribution of the test set: ")
print(self.test_Y.value_counts())
def build(self, handler, new_col):
self.train_X[new_col] = self.train_X.apply(lambda row: handler(row), axis=1)
self.test_X[new_col] = self.test_X.apply(lambda row: handler(row), axis=1)
def save_data(self, filename, verbose=False):
file = self.output + filename
data = {'train_X': self.train_X, 'train_Y':self.train_Y, 'test_X':self.test_X, 'test_Y': self.test_Y}
with open(file, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
if verbose:
print("For training dataset: ")
print(self.train_X.shape)
print("The distribution of the training set: ")
print(self.train_Y.value_counts())
print("For test dataset: ")
print(self.test_X.shape)
print("The distribution of the test set: ")
print(self.test_Y.value_counts())
class UniPlot(object):
def __init__(self, data, column,size):
self.data = data
self.column = column
self.size = size
def set_col(self, col):
_check_col(col, self.data)
self.column = col
def set_size(self, size):
self.size = size
def plot_counts(self):
_check_col(self.column, self.data)
f, ax = plt.subplots(figsize=self.size)
sns.countplot(y=self.column, data=self.data, color='c')
def plot_dist(self):
_check_col(self.column, self.data)
plt.figure(figsize = self.size)
sns.distplot(self.data[self.column].dropna(), color = 'r', fit = norm)
def plot_pp(self):
_check_col(self.column, self.data)
fig = plt.figure(figsize = self.size)
ax = fig.add_subplot(1, 1, 1)
ax.text(0.95, 0.95, "Skewness: %f\n Kurtosis: %f" %(self.data[self.column].skew(),
self.data[self.column].kurt()), transform=ax.transAxes,
va="top")
st.probplot(self.data[self.column], dist="norm", plot=ax)
def plot_kde(self, target):
_check_col(target, self.data)
_check_col(self.column, self.data)
facet = sns.FacetGrid(self.data, hue=target, aspect=4)
facet.map(sns.kdeplot, self.column, shade=True)
facet.set(xlim=(0, self.data[self.column].max()))
facet.add_legend()
def boxcox_trans(self, alpha=0.15):
self.data[self.column] = boxcoxTransform(self.data[self.column], alpha)
def log_trans(self):
self.data[self.column] = np.log1p(self.data[self.column])
class Correlations(object):
def __init__(self, data, target=None):
if target:
_check_col(target, data)
self.target = target
self.corrolations = data.corr()
def plot_heatmap(self, size, annot=False, fmt='.2g'):
plt.figure(figsize=size)
sns.heatmap(self.corrolations, vmin=-1, vmax=1, annot=annot, fmt=fmt, square=True)
def plot_corr(self, size, target=None):
self.corrolations['sorted'] = self.corrolations[self.target].abs()
self.corrolations.sort(columns='sorted').drop('sorted', axis=1)
if self.target:
match_corr = self.corrolations[self.target]
elif target:
match_corr = self.corrolations[target]
else:
raise ValueError("There is no value for target argument. The name of target column is needed.")
match_corr.plot.barh(figsize=size)
def boxcoxTransform(column, alpha=0.15):
column = boxcox1p(column, alpha)
column += 1
return column
| mit |
stefanodoni/mtperf | statistics/LiveStatsGenerator.py | 2 | 3271 | from database import DBConstants
from parsers.Parser import Parser
import pandas as pd
import numpy as np
import sys
class LiveStatsGenerator:
# The normalize_perf parameter is needed to correctly compute the number of perf metrics accordingly to the measurement interval
def extract(self, table, DBconn, startTS, endTS, user_interval, normalize_perf=False):
# Get column names from DB
# Compute the measurement interval
df = pd.read_sql_query("SELECT * "
"FROM " + table + " "
"LIMIT 2", DBconn)
if table == DBConstants.SAR_TABLE:
firstTs = pd.datetime.strptime(df[Parser.TIMESTAMP_STR][0], '%Y-%m-%d %H:%M:%S')
secondTs = pd.datetime.strptime(df[Parser.TIMESTAMP_STR][1], '%Y-%m-%d %H:%M:%S')
interval = int((secondTs - firstTs).seconds)
if interval > int(user_interval): # Exit if the measurement interval is greater than the user interval
print("Warning: SAR measurement interval (" + str(interval) + " seconds) is greater than requested sampling interval (" + str(user_interval) + " seconds). Increase interval argument.")
sys.exit()
elif table == DBConstants.PERF_TABLE:
firstTs = pd.datetime.strptime(df[Parser.TIMESTAMP_STR][0], '%Y-%m-%d %H:%M:%S.%f')
secondTs = pd.datetime.strptime(df[Parser.TIMESTAMP_STR][1], '%Y-%m-%d %H:%M:%S.%f')
interval = int((secondTs - firstTs).seconds)
if interval > int(user_interval): # Exit if the measurement interval is greater than the user interval
print("Warning: PERF measurement interval (" + str(interval) + " seconds) is greater than requested sampling interval (" + str(user_interval) + " seconds). Increase interval argument.")
sys.exit()
df.drop(['index', Parser.TIMESTAMP_STR], axis=1, inplace=True)
mycolumns = df.columns
for start, end in zip(startTS, endTS):
# Extract dataframe
df = pd.read_sql_query("SELECT * "
"FROM " + table + " "
"WHERE " + Parser.TIMESTAMP_STR + " >= '" + str(start) + "' " +
"AND " + Parser.TIMESTAMP_STR + " <= '" + str(end) + "' ", DBconn)
df.drop(['index'], axis=1, inplace=True) # Remove first two cols, unused in stats
# Replace negative values with NaN, for statistical purpose, exclude timestamp column
if table == DBConstants.PERF_TABLE:
df[df.iloc[:, 1:] < 0] = np.nan
if len(df) > 0: # We extracted collected data
# Divide all values by the perf measurement interval
if normalize_perf:
df.iloc[:, 1:] = df.iloc[:, 1:].div(interval)
else: # No data was collected during this interval
zeros = pd.DataFrame(data=np.zeros((0,len(mycolumns))), columns=mycolumns)
df = df.append(zeros)
# Rename Sar column timestamp
if table == DBConstants.SAR_TABLE:
df.rename(columns = {Parser.TIMESTAMP_STR: Parser.TIMESTAMP_START_STR}, inplace = True)
return df | gpl-2.0 |
ephes/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
jpmml/sklearn2pmml | sklearn2pmml/ensemble/tests/__init__.py | 1 | 1997 | from pandas import DataFrame
from sklearn.base import clone
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import ElasticNet, LinearRegression, LogisticRegression, SGDClassifier, SGDRegressor
from sklearn.svm import LinearSVC
from sklearn2pmml.ensemble import _checkLM, _checkLR, _step_params, SelectFirstClassifier
from unittest import TestCase
class GBDTLRTest(TestCase):
def test_lm(self):
_checkLM(ElasticNet())
_checkLM(LinearRegression())
_checkLM(SGDRegressor())
def test_lr(self):
_checkLR(LinearSVC())
_checkLR(LogisticRegression())
_checkLR(SGDClassifier())
def test_step_params(self):
params = {
"gbdt__first" : 1,
"lr__first" : 1.0,
"gbdt__second" : 2,
"any__any" : None
}
gbdt_params = _step_params("gbdt", params)
self.assertEqual({"first" : 1, "second" : 2}, gbdt_params)
self.assertEqual({"lr__first" : 1.0, "any__any" : None}, params)
lr_params = _step_params("lr", params)
self.assertEqual({"first" : 1.0}, lr_params)
self.assertEqual({"any__any" : None}, params)
class SelectFirstClassifierTest(TestCase):
def test_fit_predict(self):
df = DataFrame([[-1, 0], [0, 0], [-1, -1], [1, 1], [-1, -1]], columns = ["X", "y"])
X = df[["X"]]
y = df["y"]
classifier = clone(SelectFirstClassifier([
("negative", DummyClassifier(strategy = "most_frequent"), "X[0] < 0"),
("positive", DummyClassifier(strategy = "most_frequent"), "X[0] > 0"),
("zero", DummyClassifier(strategy = "constant", constant = 0), str(True))
]))
params = classifier.get_params(deep = True)
self.assertEqual("most_frequent", params["negative__strategy"])
self.assertEqual("most_frequent", params["positive__strategy"])
self.assertEqual("constant", params["zero__strategy"])
self.assertEqual(0, params["zero__constant"])
classifier.fit(X, y)
preds = classifier.predict(X)
self.assertEqual([-1, 0, -1, 1, -1], preds.tolist())
pred_probs = classifier.predict_proba(X)
self.assertEqual((5, 2), pred_probs.shape) | agpl-3.0 |
jreback/pandas | pandas/tests/series/methods/test_align.py | 2 | 5341 | import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Series, date_range, period_range
import pandas._testing as tm
@pytest.mark.parametrize(
"first_slice,second_slice",
[
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]],
],
)
@pytest.mark.parametrize("fill", [None, -1])
def test_align(datetime_series, first_slice, second_slice, join_type, fill):
a = datetime_series[slice(*first_slice)]
b = datetime_series[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, fill_value=fill)
join_index = a.index.join(b.index, how=join_type)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
tm.assert_series_equal(aa, ea)
tm.assert_series_equal(ab, eb)
assert aa.name == "ts"
assert ea.name == "ts"
assert ab.name == "ts"
assert eb.name == "ts"
@pytest.mark.parametrize(
"first_slice,second_slice",
[
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]],
],
)
@pytest.mark.parametrize("method", ["pad", "bfill"])
@pytest.mark.parametrize("limit", [None, 1])
def test_align_fill_method(
datetime_series, first_slice, second_slice, join_type, method, limit
):
a = datetime_series[slice(*first_slice)]
b = datetime_series[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, method=method, limit=limit)
join_index = a.index.join(b.index, how=join_type)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
tm.assert_series_equal(aa, ea)
tm.assert_series_equal(ab, eb)
def test_align_nocopy(datetime_series):
b = datetime_series[:5].copy()
# do copy
a = datetime_series.copy()
ra, _ = a.align(b, join="left")
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = datetime_series.copy()
ra, _ = a.align(b, join="left", copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = datetime_series.copy()
b = datetime_series[:5].copy()
_, rb = a.align(b, join="right")
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = datetime_series.copy()
b = datetime_series[:5].copy()
_, rb = a.align(b, join="right", copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(datetime_series):
a, b = datetime_series.align(datetime_series, copy=False)
assert a.index is datetime_series.index
assert b.index is datetime_series.index
a, b = datetime_series.align(datetime_series, copy=True)
assert a.index is not datetime_series.index
assert b.index is not datetime_series.index
def test_align_multiindex():
# GH 10665
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
s1 = Series(np.arange(12, dtype="int64"), index=midx)
s2 = Series(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join="left")
res2l, res2r = s2.align(s1, join="right")
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join="right")
res2l, res2r = s2.align(s1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])
def test_align_with_dataframe_method(method):
# GH31788
ser = Series(range(3), index=range(3))
df = pd.DataFrame(0.0, index=range(3), columns=range(3))
result_ser, result_df = ser.align(df, method=method)
tm.assert_series_equal(result_ser, ser)
tm.assert_frame_equal(result_df, df)
def test_align_dt64tzindex_mismatched_tzs():
idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(idx1)), index=idx1)
ser_central = ser.tz_convert("US/Central")
# different timezones convert to UTC
new1, new2 = ser.align(ser_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
def test_align_periodindex(join_type):
rng = period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
# TODO: assert something?
ts.align(ts[::2], join=join_type)
| bsd-3-clause |
Averroes/statsmodels | examples/incomplete/wls_extended.py | 33 | 16137 | """
Weighted Least Squares
example is extended to look at the meaning of rsquared in WLS,
at outliers, compares with RLM and a short bootstrap
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
data = sm.datasets.ccard.load()
data.exog = sm.add_constant(data.exog, prepend=False)
ols_fit = sm.OLS(data.endog, data.exog).fit()
# perhaps the residuals from this fit depend on the square of income
incomesq = data.exog[:,2]
plt.scatter(incomesq, ols_fit.resid)
#@savefig wls_resid_check.png
plt.grid()
# If we think that the variance is proportional to income**2
# we would want to weight the regression by income
# the weights argument in WLS weights the regression by its square root
# and since income enters the equation, if we have income/income
# it becomes the constant, so we would want to perform
# this type of regression without an explicit constant in the design
#..data.exog = data.exog[:,:-1]
wls_fit = sm.WLS(data.endog, data.exog[:,:-1], weights=1/incomesq).fit()
# This however, leads to difficulties in interpreting the post-estimation
# statistics. Statsmodels does not yet handle this elegantly, but
# the following may be more appropriate
# explained sum of squares
ess = wls_fit.uncentered_tss - wls_fit.ssr
# rsquared
rsquared = ess/wls_fit.uncentered_tss
# mean squared error of the model
mse_model = ess/(wls_fit.df_model + 1) # add back the dof of the constant
# f statistic
fvalue = mse_model/wls_fit.mse_resid
# adjusted r-squared
rsquared_adj = 1 -(wls_fit.nobs)/(wls_fit.df_resid)*(1-rsquared)
#Trying to figure out what's going on in this example
#----------------------------------------------------
#JP: I need to look at this again. Even if I exclude the weight variable
# from the regressors and keep the constant in then the reported rsquared
# stays small. Below also compared using squared or sqrt of weight variable.
# TODO: need to add 45 degree line to graphs
wls_fit3 = sm.WLS(data.endog, data.exog[:,(0,1,3,4)], weights=1/incomesq).fit()
print(wls_fit3.summary())
print('corrected rsquared')
print((wls_fit3.uncentered_tss - wls_fit3.ssr)/wls_fit3.uncentered_tss)
plt.figure();
plt.title('WLS dropping heteroscedasticity variable from regressors');
plt.plot(data.endog, wls_fit3.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_drop_het.png
plt.ylim([0,2000]);
print('raw correlation of endog and fittedvalues')
print(np.corrcoef(data.endog, wls_fit.fittedvalues))
print('raw correlation coefficient of endog and fittedvalues squared')
print(np.corrcoef(data.endog, wls_fit.fittedvalues)[0,1]**2)
# compare with robust regression,
# heteroscedasticity correction downweights the outliers
rlm_fit = sm.RLM(data.endog, data.exog).fit()
plt.figure();
plt.title('using robust for comparison');
plt.plot(data.endog, rlm_fit.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_robust_compare.png
plt.ylim([0,2000]);
#What is going on? A more systematic look at the data
#----------------------------------------------------
# two helper functions
def getrsq(fitresult):
'''calculates rsquared residual, total and explained sums of squares
Parameters
----------
fitresult : instance of Regression Result class, or tuple of (resid, endog) arrays
regression residuals and endogenous variable
Returns
-------
rsquared
residual sum of squares
(centered) total sum of squares
explained sum of squares (for centered)
'''
if hasattr(fitresult, 'resid') and hasattr(fitresult, 'model'):
resid = fitresult.resid
endog = fitresult.model.endog
nobs = fitresult.nobs
else:
resid = fitresult[0]
endog = fitresult[1]
nobs = resid.shape[0]
rss = np.dot(resid, resid)
tss = np.var(endog)*nobs
return 1-rss/tss, rss, tss, tss-rss
def index_trim_outlier(resid, k):
'''returns indices to residual array with k outliers removed
Parameters
----------
resid : array_like, 1d
data vector, usually residuals of a regression
k : int
number of outliers to remove
Returns
-------
trimmed_index : array, 1d
index array with k outliers removed
outlier_index : array, 1d
index array of k outliers
Notes
-----
Outliers are defined as the k observations with the largest
absolute values.
'''
sort_index = np.argsort(np.abs(resid))
# index of non-outlier
trimmed_index = np.sort(sort_index[:-k])
outlier_index = np.sort(sort_index[-k:])
return trimmed_index, outlier_index
#Comparing estimation results for ols, rlm and wls with and without outliers
#---------------------------------------------------------------------------
#ols_test_fit = sm.OLS(data.endog, data.exog).fit()
olskeep, olsoutl = index_trim_outlier(ols_fit.resid, 2)
print('ols outliers', olsoutl, ols_fit.resid[olsoutl])
ols_fit_rm2 = sm.OLS(data.endog[olskeep], data.exog[olskeep,:]).fit()
rlm_fit_rm2 = sm.RLM(data.endog[olskeep], data.exog[olskeep,:]).fit()
#weights = 1/incomesq
results = [ols_fit, ols_fit_rm2, rlm_fit, rlm_fit_rm2]
#Note: I think incomesq is already square
for weights in [1/incomesq, 1/incomesq**2, np.sqrt(incomesq)]:
print('\nComparison OLS and WLS with and without outliers')
wls_fit0 = sm.WLS(data.endog, data.exog, weights=weights).fit()
wls_fit_rm2 = sm.WLS(data.endog[olskeep], data.exog[olskeep,:],
weights=weights[olskeep]).fit()
wlskeep, wlsoutl = index_trim_outlier(ols_fit.resid, 2)
print('2 outliers candidates and residuals')
print(wlsoutl, wls_fit.resid[olsoutl])
# redundant because ols and wls outliers are the same:
##wls_fit_rm2_ = sm.WLS(data.endog[wlskeep], data.exog[wlskeep,:],
## weights=1/incomesq[wlskeep]).fit()
print('outliers ols, wls:', olsoutl, wlsoutl)
print('rsquared')
print('ols vs ols rm2', ols_fit.rsquared, ols_fit_rm2.rsquared)
print('wls vs wls rm2', wls_fit0.rsquared, wls_fit_rm2.rsquared) #, wls_fit_rm2_.rsquared
print('compare R2_resid versus R2_wresid')
print('ols minus 2', getrsq(ols_fit_rm2)[0],)
print(getrsq((ols_fit_rm2.wresid, ols_fit_rm2.model.wendog))[0])
print('wls ', getrsq(wls_fit)[0],)
print(getrsq((wls_fit.wresid, wls_fit.model.wendog))[0])
print('wls minus 2', getrsq(wls_fit_rm2)[0])
# next is same as wls_fit_rm2.rsquared for cross checking
print(getrsq((wls_fit_rm2.wresid, wls_fit_rm2.model.wendog))[0])
#print(getrsq(wls_fit_rm2_)[0],
#print(getrsq((wls_fit_rm2_.wresid, wls_fit_rm2_.model.wendog))[0]
results.extend([wls_fit0, wls_fit_rm2])
print(' ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)')
print('Parameter estimates')
print(np.column_stack([r.params for r in results]))
print('R2 original data, next line R2 weighted data')
print(np.column_stack([getattr(r, 'rsquared', None) for r in results]))
print('Standard errors')
print(np.column_stack([getattr(r, 'bse', None) for r in results]))
print('Heteroscedasticity robust standard errors (with ols)')
print('with outliers')
print(np.column_stack([getattr(ols_fit, se, None) for se in ['HC0_se', 'HC1_se', 'HC2_se', 'HC3_se']]))
#..'''
#..
#.. ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)
#..Parameter estimates
#..[[ -3.08181404 -5.06103843 -4.98510966 -5.34410309 -2.69418516 -3.1305703 -1.43815462 -1.58893054 -3.57074829 -6.80053364]
#.. [ 234.34702702 115.08753715 129.85391456 109.01433492 158.42697752 128.38182357 60.95113284 100.25000841 254.82166855 103.75834726]
#.. [ -14.99684418 -5.77558429 -6.46204829 -4.77409191 -7.24928987 -7.41228893 6.84943071 -3.34972494 -16.40524256 -4.5924465 ]
#.. [ 27.94090839 85.46566835 89.91389709 95.85086459 60.44877369 79.7759146 55.9884469 60.97199734 -3.8085159 84.69170048]
#.. [-237.1465136 39.51639838 -15.50014814 31.39771833 -114.10886935 -40.04207242 -6.41976501 -38.83583228 -260.72084271 117.20540179]]
#..
#..R2 original data, next line R2 weighted data
#..[[ 0.24357792 0.31745994 0.19220308 0.30527648 0.22861236 0.3112333 0.06573949 0.29366904 0.24114325 0.31218669]]
#..[[ 0.24357791 0.31745994 None None 0.05936888 0.0679071 0.06661848 0.12769654 0.35326686 0.54681225]]
#..
#..-> R2 with weighted data is jumping all over
#..
#..standard errors
#..[[ 5.51471653 3.31028758 2.61580069 2.39537089 3.80730631 2.90027255 2.71141739 2.46959477 6.37593755 3.39477842]
#.. [ 80.36595035 49.35949263 38.12005692 35.71722666 76.39115431 58.35231328 87.18452039 80.30086861 86.99568216 47.58202096]
#.. [ 7.46933695 4.55366113 3.54293763 3.29509357 9.72433732 7.41259156 15.15205888 14.10674821 7.18302629 3.91640711]
#.. [ 82.92232357 50.54681754 39.33262384 36.57639175 58.55088753 44.82218676 43.11017757 39.31097542 96.4077482 52.57314209]
#.. [ 199.35166485 122.1287718 94.55866295 88.3741058 139.68749646 106.89445525 115.79258539 105.99258363 239.38105863 130.32619908]]
#..
#..robust standard errors (with ols)
#..with outliers
#.. HC0_se HC1_se HC2_se HC3_se'
#..[[ 3.30166123 3.42264107 3.4477148 3.60462409]
#.. [ 88.86635165 92.12260235 92.08368378 95.48159869]
#.. [ 6.94456348 7.19902694 7.19953754 7.47634779]
#.. [ 92.18777672 95.56573144 95.67211143 99.31427277]
#.. [ 212.9905298 220.79495237 221.08892661 229.57434782]]
#..
#..removing 2 outliers
#..[[ 2.57840843 2.67574088 2.68958007 2.80968452]
#.. [ 36.21720995 37.58437497 37.69555106 39.51362437]
#.. [ 3.1156149 3.23322638 3.27353882 3.49104794]
#.. [ 50.09789409 51.98904166 51.89530067 53.79478834]
#.. [ 94.27094886 97.82958699 98.25588281 102.60375381]]
#..
#..
#..'''
# a quick bootstrap analysis
# --------------------------
#
#(I didn't check whether this is fully correct statistically)
#**With OLS on full sample**
nobs, nvar = data.exog.shape
niter = 2000
bootres = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data.endog[rind]
exog = data.exog[rind,:]
res = sm.OLS(endog, exog).fit()
bootres[it, :nvar] = res.params
bootres[it, nvar:] = res.bse
np.set_print(options(linewidth=200))
print('Bootstrap Results of parameters and parameter standard deviation OLS')
print('Parameter estimates')
print('median', np.median(bootres[:,:5], 0))
print('mean ', np.mean(bootres[:,:5], 0))
print('std ', np.std(bootres[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootres[:,5:], 0))
print('mean ', np.mean(bootres[:,5:], 0))
print('std ', np.std(bootres[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootres[:,i],50)
plt.title('var%d'%i)
#@savefig wls_bootstrap.png
plt.figtext(0.5, 0.935, 'OLS Bootstrap',
ha='center', color='black', weight='bold', size='large')
#**With WLS on sample with outliers removed**
data_endog = data.endog[olskeep]
data_exog = data.exog[olskeep,:]
incomesq_rm2 = incomesq[olskeep]
nobs, nvar = data_exog.shape
niter = 500 # a bit slow
bootreswls = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data_endog[rind]
exog = data_exog[rind,:]
res = sm.WLS(endog, exog, weights=1/incomesq[rind,:]).fit()
bootreswls[it, :nvar] = res.params
bootreswls[it, nvar:] = res.bse
print('Bootstrap Results of parameters and parameter standard deviation',)
print('WLS removed 2 outliers from sample')
print('Parameter estimates')
print('median', np.median(bootreswls[:,:5], 0))
print('mean ', np.mean(bootreswls[:,:5], 0))
print('std ', np.std(bootreswls[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootreswls[:,5:], 0))
print('mean ', np.mean(bootreswls[:,5:], 0))
print('std ', np.std(bootreswls[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootreswls[:,i],50)
plt.title('var%d'%i)
#@savefig wls_bootstrap_rm2.png
plt.figtext(0.5, 0.935, 'WLS rm2 Bootstrap',
ha='center', color='black', weight='bold', size='large')
#..plt.show()
#..plt.close('all')
#::
#
# The following a random variables not fixed by a seed
#
# Bootstrap Results of parameters and parameter standard deviation
# OLS
#
# Parameter estimates
# median [ -3.26216383 228.52546429 -14.57239967 34.27155426 -227.02816597]
# mean [ -2.89855173 234.37139359 -14.98726881 27.96375666 -243.18361746]
# std [ 3.78704907 97.35797802 9.16316538 94.65031973 221.79444244]
#
# Standard deviation of parameter estimates
# median [ 5.44701033 81.96921398 7.58642431 80.64906783 200.19167735]
# mean [ 5.44840542 86.02554883 8.56750041 80.41864084 201.81196849]
# std [ 1.43425083 29.74806562 4.22063268 19.14973277 55.34848348]
#
# Bootstrap Results of parameters and parameter standard deviation
# WLS removed 2 outliers from sample
#
# Parameter estimates
# median [ -3.95876112 137.10419042 -9.29131131 88.40265447 -44.21091869]
# mean [ -3.67485724 135.42681207 -8.7499235 89.74703443 -46.38622848]
# std [ 2.96908679 56.36648967 7.03870751 48.51201918 106.92466097]
#
# Standard deviation of parameter estimates
# median [ 2.89349748 59.19454402 6.70583332 45.40987953 119.05241283]
# mean [ 2.97600894 60.14540249 6.92102065 45.66077486 121.35519673]
# std [ 0.55378808 11.77831934 1.69289179 7.4911526 23.72821085]
#
#
#
#Conclusion: problem with outliers and possibly heteroscedasticity
#-----------------------------------------------------------------
#
#in bootstrap results
#
#* bse in OLS underestimates the standard deviation of the parameters
# compared to standard deviation in bootstrap
#* OLS heteroscedasticity corrected standard errors for the original
# data (above) are close to bootstrap std
#* using WLS with 2 outliers removed has a relatively good match between
# the mean or median bse and the std of the parameter estimates in the
# bootstrap
#
#We could also include rsquared in bootstrap, and do it also for RLM.
#The problems could also mean that the linearity assumption is violated,
#e.g. try non-linear transformation of exog variables, but linear
#in parameters.
#
#
#for statsmodels
#
# * In this case rsquared for original data looks less random/arbitrary.
# * Don't change definition of rsquared from centered tss to uncentered
# tss when calculating rsquared in WLS if the original exog contains
# a constant. The increase in rsquared because of a change in definition
# will be very misleading.
# * Whether there is a constant in the transformed exog, wexog, or not,
# might affect also the degrees of freedom calculation, but I haven't
# checked this. I would guess that the df_model should stay the same,
# but needs to be verified with a textbook.
# * df_model has to be adjusted if the original data does not have a
# constant, e.g. when regressing an endog on a single exog variable
# without constant. This case might require also a redefinition of
# the rsquare and f statistic for the regression anova to use the
# uncentered tss.
# This can be done through keyword parameter to model.__init__ or
# through autodedection with hasconst = (exog.var(0)<1e-10).any()
# I'm not sure about fixed effects with a full dummy set but
# without a constant. In this case autodedection wouldn't work this
# way. Also, I'm not sure whether a ddof keyword parameter can also
# handle the hasconst case.
| bsd-3-clause |
anntzer/scikit-learn | sklearn/neighbors/_lof.py | 2 | 21119 | # Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
import warnings
from ._base import NeighborsBase
from ._base import KNeighborsMixin
from ..base import OutlierMixin
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(KNeighborsMixin,
OutlierMixin,
NeighborsBase):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
.. versionadded:: 0.19
Parameters
----------
n_neighbors : int, default=20
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
https://docs.scipy.org/doc/scipy/reference/spatial.distance.html
p : int, default=2
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
novelty : bool, default=False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
you should only use predict, decision_function and score_samples
on new unseen data and not on the training set.
.. versionadded:: 0.20
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
negative_outlier_factor_ : ndarray of shape (n_samples,)
The opposite LOF of the training samples. The higher, the more normal.
Inliers tend to have a LOF score close to 1
(``negative_outlier_factor_`` close to -1), while outliers tend to have
a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : int
The actual number of neighbors used for :meth:`kneighbors` queries.
offset_ : float
Offset used to obtain binary labels from the raw scores.
Observations having a negative_outlier_factor smaller than `offset_`
are detected as abnormal.
The offset is set to -1.5 (inliers score around -1), except when a
contamination parameter different than "auto" is provided. In that
case, the offset is defined in such a way we obtain the expected
number of outliers in training.
.. versionadded:: 0.20
effective_metric_ : str
The effective metric used for the distance computation.
effective_metric_params_ : dict
The effective additional keyword arguments for the metric function.
n_samples_fit_ : int
It is the number of samples in the fitted data.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import LocalOutlierFactor
>>> X = [[-1.1], [0.2], [101.1], [0.3]]
>>> clf = LocalOutlierFactor(n_neighbors=2)
>>> clf.fit_predict(X)
array([ 1, 1, -1, 1])
>>> clf.negative_outlier_factor_
array([ -0.9821..., -1.0370..., -73.3697..., -0.9821...])
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
@_deprecate_positional_args
def __init__(self, n_neighbors=20, *, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination="auto", novelty=False, n_jobs=None):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
self.novelty = novelty
@property
def fit_predict(self):
"""Fits the model to the training set X and returns the labels.
**Not available for novelty detection (when novelty is set to True).**
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
if self.novelty:
msg = ('fit_predict is not available when novelty=True. Use '
'novelty=False if you want to predict on the training set.')
raise AttributeError(msg)
return self._fit_predict
def _fit_predict(self, X, y=None):
"""Fits the model to the training set X and returns the labels.
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
self._fit(X)
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
n_neighbors=self.n_neighbors_)
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(self.negative_outlier_factor_,
100. * self.contamination)
return self
@property
def predict(self):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
**Only available for novelty detection (when novelty is set to True).**
This method allows to generalize prediction to *new observations* (not
in the training set).
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
if not self.novelty:
msg = ('predict is not available when novelty=False, use '
'fit_predict if you want to predict on training data. Use '
'novelty=True if you want to use LOF for novelty detection '
'and predict on new unseen data.')
raise AttributeError(msg)
return self._predict
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
else:
is_inlier = np.ones(self.n_samples_fit_, dtype=int)
is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
return is_inlier
@property
def decision_function(self):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
**Only available for novelty detection (when novelty is set to True).**
The shift offset allows a zero threshold for being an outlier.
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : ndarray of shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
if not self.novelty:
msg = ('decision_function is not available when novelty=False. '
'Use novelty=True if you want to use LOF for novelty '
'detection and compute decision_function for new unseen '
'data. Note that the opposite LOF of the training samples '
'is always available by considering the '
'negative_outlier_factor_ attribute.')
raise AttributeError(msg)
return self._decision_function
def _decision_function(self, X):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
**Only available for novelty detection (when novelty is set to True).**
The shift offset allows a zero threshold for being an outlier.
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : ndarray of shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
return self._score_samples(X) - self.offset_
@property
def score_samples(self):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
if not self.novelty:
msg = ('score_samples is not available when novelty=False. The '
'scores of the training samples are always available '
'through the negative_outlier_factor_ attribute. Use '
'novelty=True if you want to use LOF for novelty detection '
'and compute score_samples for new unseen data.')
raise AttributeError(msg)
return self._score_samples
def _score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : ndarray of shape (n_queries, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : ndarray of shape (n_queries,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
| bsd-3-clause |
bloyl/mne-python | examples/decoding/ssd_spatial_filters.py | 10 | 5433 | """
===========================================================
Compute Spectro-Spatial Decomposition (SSD) spatial filters
===========================================================
In this example, we will compute spatial filters for retaining
oscillatory brain activity and down-weighting 1/f background signals
as proposed by :footcite:`NikulinEtAl2011`.
The idea is to learn spatial filters that separate oscillatory dynamics
from surrounding non-oscillatory noise based on the covariance in the
frequency band of interest and the noise covariance based on surrounding
frequencies.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# Victoria Peterson <victoriapeterson09@gmail.com>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.datasets.fieldtrip_cmc import data_path
from mne.decoding import SSD
###############################################################################
# Define parameters
fname = data_path() + '/SubjectCMC.ds'
# Prepare data
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 110.).load_data() # crop for memory purposes
raw.resample(sfreq=250)
raw.pick_types(meg=True, eeg=False, ref_meg=False)
freqs_sig = 9, 12
freqs_noise = 8, 13
ssd = SSD(info=raw.info,
reg='oas',
sort_by_spectral_ratio=False, # False for purpose of example.
filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1),
filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1))
ssd.fit(X=raw.get_data())
###############################################################################
# Let's investigate spatial filter with max power ratio.
# We will first inspect the topographies.
# According to Nikulin et al. 2011 this is done by either inverting the filters
# (W^{-1}) or by multiplying the noise cov with the filters Eq. (22) (C_n W)^t.
# We rely on the inversion approach here.
pattern = mne.EvokedArray(data=ssd.patterns_[:4].T,
info=ssd.info)
pattern.plot_topomap(units=dict(mag='A.U.'), time_format='')
# The topographies suggest that we picked up a parietal alpha generator.
# Transform
ssd_sources = ssd.transform(X=raw.get_data())
# Get psd of SSD-filtered signals.
psd, freqs = mne.time_frequency.psd_array_welch(
ssd_sources, sfreq=raw.info['sfreq'], n_fft=4096)
# Get spec_ratio information (already sorted).
# Note that this is not necessary if sort_by_spectral_ratio=True (default).
spec_ratio, sorter = ssd.get_spectral_ratio(ssd_sources)
# Plot spectral ratio (see Eq. 24 in Nikulin 2011).
fig, ax = plt.subplots(1)
ax.plot(spec_ratio, color='black')
ax.plot(spec_ratio[sorter], color='orange', label='sorted eigenvalues')
ax.set_xlabel("Eigenvalue Index")
ax.set_ylabel(r"Spectral Ratio $\frac{P_f}{P_{sf}}$")
ax.legend()
ax.axhline(1, linestyle='--')
# We can see that the initial sorting based on the eigenvalues
# was already quite good. However, when using few components only
# the sorting might make a difference.
###############################################################################
# Let's also look at the power spectrum of that source and compare it to
# to the power spectrum of the source with lowest SNR.
below50 = freqs < 50
# for highlighting the freq. band of interest
bandfilt = (freqs_sig[0] <= freqs) & (freqs <= freqs_sig[1])
fig, ax = plt.subplots(1)
ax.loglog(freqs[below50], psd[0, below50], label='max SNR')
ax.loglog(freqs[below50], psd[-1, below50], label='min SNR')
ax.loglog(freqs[below50], psd[:, below50].mean(axis=0), label='mean')
ax.fill_between(freqs[bandfilt], 0, 10000, color='green', alpha=0.15)
ax.set_xlabel('log(frequency)')
ax.set_ylabel('log(power)')
ax.legend()
# We can clearly see that the selected component enjoys an SNR that is
# way above the average power spectrum.
###############################################################################
# Epoched data
# ------------
# Although we suggest to use this method before epoching, there might be some
# situations in which data can only be treated by chunks.
# Build epochs as sliding windows over the continuous raw file.
events = mne.make_fixed_length_events(raw, id=1, duration=5.0, overlap=0.0)
# Epoch length is 5 seconds.
epochs = Epochs(raw, events, tmin=0., tmax=5,
baseline=None, preload=True)
ssd_epochs = SSD(info=epochs.info,
reg='oas',
filt_params_signal=dict(l_freq=freqs_sig[0],
h_freq=freqs_sig[1],
l_trans_bandwidth=1,
h_trans_bandwidth=1),
filt_params_noise=dict(l_freq=freqs_noise[0],
h_freq=freqs_noise[1],
l_trans_bandwidth=1,
h_trans_bandwidth=1))
ssd_epochs.fit(X=epochs.get_data())
# Plot topographies.
pattern_epochs = mne.EvokedArray(data=ssd_epochs.patterns_[:4].T,
info=ssd_epochs.info)
pattern_epochs.plot_topomap(units=dict(mag='A.U.'), time_format='')
###############################################################################
# References
# ----------
#
# .. footbibliography::
| bsd-3-clause |
procoder317/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
hyperspy/hyperspy | hyperspy/_signals/eds_tem.py | 1 | 40736 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import warnings
import logging
import traits.api as t
import numpy as np
from scipy import constants
import pint
from hyperspy.signal import BaseSetMetadataItems
from hyperspy import utils
from hyperspy._signals.eds import (EDSSpectrum, LazyEDSSpectrum)
from hyperspy.defaults_parser import preferences
from hyperspy.ui_registry import add_gui_method, DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.eds import utils as utils_eds
from hyperspy.misc.elements import elements as elements_db
from hyperspy.misc.utils import isiterable
from hyperspy.external.progressbar import progressbar
from hyperspy.axes import DataAxis
_logger = logging.getLogger(__name__)
@add_gui_method(toolkey="hyperspy.microscope_parameters_EDS_TEM")
class EDSTEMParametersUI(BaseSetMetadataItems):
beam_energy = t.Float(t.Undefined,
label='Beam energy (keV)')
real_time = t.Float(t.Undefined,
label='Real time (s)')
tilt_stage = t.Float(t.Undefined,
label='Stage tilt (degree)')
live_time = t.Float(t.Undefined,
label='Live time (s)')
probe_area = t.Float(t.Undefined,
label='Beam/probe area (nm²)')
azimuth_angle = t.Float(t.Undefined,
label='Azimuth angle (degree)')
elevation_angle = t.Float(t.Undefined,
label='Elevation angle (degree)')
energy_resolution_MnKa = t.Float(t.Undefined,
label='Energy resolution MnKa (eV)')
beam_current = t.Float(t.Undefined,
label='Beam current (nA)')
mapping = {
'Acquisition_instrument.TEM.beam_energy': 'beam_energy',
'Acquisition_instrument.TEM.Stage.tilt_alpha': 'tilt_stage',
'Acquisition_instrument.TEM.Detector.EDS.live_time': 'live_time',
'Acquisition_instrument.TEM.Detector.EDS.azimuth_angle':
'azimuth_angle',
'Acquisition_instrument.TEM.Detector.EDS.elevation_angle':
'elevation_angle',
'Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa':
'energy_resolution_MnKa',
'Acquisition_instrument.TEM.beam_current':
'beam_current',
'Acquisition_instrument.TEM.probe_area':
'probe_area',
'Acquisition_instrument.TEM.Detector.EDS.real_time':
'real_time', }
class EDSTEMSpectrum(EDSSpectrum):
_signal_type = "EDS_TEM"
def __init__(self, *args, **kwards):
super().__init__(*args, **kwards)
# Attributes defaults
if 'Acquisition_instrument.TEM.Detector.EDS' not in self.metadata:
if 'Acquisition_instrument.SEM.Detector.EDS' in self.metadata:
self.metadata.set_item(
"Acquisition_instrument.TEM",
self.metadata.Acquisition_instrument.SEM)
del self.metadata.Acquisition_instrument.SEM
self._set_default_param()
def _set_default_param(self):
"""Set to value to default (defined in preferences)
"""
mp = self.metadata
mp.Signal.signal_type = "EDS_TEM"
mp = self.metadata
if "Acquisition_instrument.TEM.Stage.tilt_alpha" not in mp:
mp.set_item(
"Acquisition_instrument.TEM.Stage.tilt_alpha",
preferences.EDS.eds_tilt_stage)
if "Acquisition_instrument.TEM.Detector.EDS.elevation_angle" not in mp:
mp.set_item(
"Acquisition_instrument.TEM.Detector.EDS.elevation_angle",
preferences.EDS.eds_detector_elevation)
if "Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa"\
not in mp:
mp.set_item("Acquisition_instrument.TEM.Detector.EDS." +
"energy_resolution_MnKa",
preferences.EDS.eds_mn_ka)
if "Acquisition_instrument.TEM.Detector.EDS.azimuth_angle" not in mp:
mp.set_item(
"Acquisition_instrument.TEM.Detector.EDS.azimuth_angle",
preferences.EDS.eds_detector_azimuth)
def set_microscope_parameters(self,
beam_energy=None,
live_time=None,
tilt_stage=None,
azimuth_angle=None,
elevation_angle=None,
energy_resolution_MnKa=None,
beam_current=None,
probe_area=None,
real_time=None,
display=True,
toolkit=None):
if set([beam_energy, live_time, tilt_stage, azimuth_angle,
elevation_angle, energy_resolution_MnKa, beam_current,
probe_area, real_time]) == {None}:
tem_par = EDSTEMParametersUI(self)
return tem_par.gui(display=display, toolkit=toolkit)
md = self.metadata
if beam_energy is not None:
md.set_item("Acquisition_instrument.TEM.beam_energy ", beam_energy)
if live_time is not None:
md.set_item(
"Acquisition_instrument.TEM.Detector.EDS.live_time",
live_time)
if tilt_stage is not None:
md.set_item(
"Acquisition_instrument.TEM.Stage.tilt_alpha",
tilt_stage)
if azimuth_angle is not None:
md.set_item(
"Acquisition_instrument.TEM.Detector.EDS.azimuth_angle",
azimuth_angle)
if elevation_angle is not None:
md.set_item(
"Acquisition_instrument.TEM.Detector.EDS.elevation_angle",
elevation_angle)
if energy_resolution_MnKa is not None:
md.set_item(
"Acquisition_instrument.TEM.Detector.EDS." +
"energy_resolution_MnKa",
energy_resolution_MnKa)
if beam_current is not None:
md.set_item(
"Acquisition_instrument.TEM.beam_current",
beam_current)
if probe_area is not None:
md.set_item(
"Acquisition_instrument.TEM.probe_area",
probe_area)
if real_time is not None:
md.set_item(
"Acquisition_instrument.TEM.Detector.EDS.real_time",
real_time)
set_microscope_parameters.__doc__ = \
"""
Set the microscope parameters.
If no arguments are given, raises an interactive mode to fill
the values.
Parameters
----------
beam_energy: float
The energy of the electron beam in keV
live_time : float
In seconds
tilt_stage : float
In degree
azimuth_angle : float
In degree
elevation_angle : float
In degree
energy_resolution_MnKa : float
In eV
beam_current: float
In nA
probe_area: float
In nm²
real_time: float
In seconds
{}
{}
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> print(s.metadata.Acquisition_instrument.
>>> TEM.Detector.EDS.energy_resolution_MnKa)
>>> s.set_microscope_parameters(energy_resolution_MnKa=135.)
>>> print(s.metadata.Acquisition_instrument.
>>> TEM.Detector.EDS.energy_resolution_MnKa)
133.312296
135.0
""".format(DISPLAY_DT, TOOLKIT_DT)
def _are_microscope_parameters_missing(self):
"""Check if the EDS parameters necessary for quantification are
defined in metadata."""
must_exist = (
'Acquisition_instrument.TEM.beam_energy',
'Acquisition_instrument.TEM.Detector.EDS.live_time',)
missing_parameters = []
for item in must_exist:
exists = self.metadata.has_item(item)
if exists is False:
missing_parameters.append(item)
if missing_parameters:
_logger.info("Missing parameters {}".format(missing_parameters))
return True
else:
return False
def get_calibration_from(self, ref, nb_pix=1):
"""Copy the calibration and all metadata of a reference.
Primary use: To add a calibration to ripple file from INCA
software
Parameters
----------
ref : signal
The reference contains the calibration in its
metadata
nb_pix : int
The live time (real time corrected from the "dead time")
is divided by the number of pixel (spectrums), giving an
average live time.
Raises
------
NotImplementedError
If the signal axis is a non-uniform axis.
Examples
--------
>>> ref = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s = hs.signals.EDSTEMSpectrum(
>>> hs.datasets.example_signals.EDS_TEM_Spectrum().data)
>>> print(s.axes_manager[0].scale)
>>> s.get_calibration_from(ref)
>>> print(s.axes_manager[0].scale)
1.0
0.020028
"""
self.original_metadata = ref.original_metadata.deepcopy()
# Setup the axes_manager
ax_m = self.axes_manager.signal_axes[0]
ax_ref = ref.axes_manager.signal_axes[0]
for _axis in [ax_m, ax_ref]:
if not _axis.is_uniform:
raise NotImplementedError(
"The function is not implemented for non-uniform axes.")
ax_m.scale = ax_ref.scale
ax_m.units = ax_ref.units
ax_m.offset = ax_ref.offset
# Setup metadata
if 'Acquisition_instrument.TEM' in ref.metadata:
mp_ref = ref.metadata.Acquisition_instrument.TEM
elif 'Acquisition_instrument.SEM' in ref.metadata:
mp_ref = ref.metadata.Acquisition_instrument.SEM
else:
raise ValueError("The reference has no metadata "
"'Acquisition_instrument.TEM '"
"or 'metadata.Acquisition_instrument.SEM'.")
mp = self.metadata
mp.Acquisition_instrument.TEM = mp_ref.deepcopy()
if mp_ref.has_item("Detector.EDS.live_time"):
mp.Acquisition_instrument.TEM.Detector.EDS.live_time = \
mp_ref.Detector.EDS.live_time / nb_pix
def quantification(self,
intensities,
method,
factors,
composition_units='atomic',
absorption_correction=False,
take_off_angle='auto',
thickness='auto',
convergence_criterion=0.5,
navigation_mask=1.0,
closing=True,
plot_result=False,
probe_area='auto',
max_iterations=30,
show_progressbar=None,
**kwargs):
"""
Absorption corrected quantification using Cliff-Lorimer, the zeta-factor
method, or ionization cross sections. The function iterates through
quantification function until two successive interations don't change
the final composition by a defined percentage critera (0.5% by default).
Parameters
----------
intensities: list of signal
the intensitiy for each X-ray lines.
method: {'CL', 'zeta', 'cross_section'}
Set the quantification method: Cliff-Lorimer, zeta-factor, or
ionization cross sections.
factors: list of float
The list of kfactors, zeta-factors or cross sections in same order
as intensities. Note that intensities provided by Hyperspy are
sorted by the alphabetical order of the X-ray lines.
eg. factors =[0.982, 1.32, 1.60] for ['Al_Ka', 'Cr_Ka', 'Ni_Ka'].
composition_units: {'atomic', 'weight'}
The quantification returns the composition in 'atomic' percent by
default, but can also return weight percent if specified.
absorption_correction: bool
Specify whether or not an absorption correction should be applied.
'False' by default so absorption will not be applied unless
specfied.
take_off_angle : {'auto'}
The angle between the sample surface and the vector along which
X-rays travel to reach the centre of the detector.
thickness: {'auto'}
thickness in nm (can be a single value or
have the same navigation dimension as the signal).
NB: Must be specified for 'CL' method. For 'zeta' or 'cross_section'
methods, first quantification step provides a mass_thickness
internally during quantification.
convergence_criterion: The convergence criterium defined as the percentage
difference between 2 successive iterations. 0.5% by default.
navigation_mask : None or float or signal
The navigation locations marked as True are not used in the
quantification. If float is given the vacuum_mask method is used to
generate a mask with the float value as threhsold.
Else provides a signal with the navigation shape. Only for the
'Cliff-Lorimer' method.
closing: bool
If true, applied a morphologic closing to the mask obtained by
vacuum_mask.
plot_result : bool
If True, plot the calculated composition. If the current
object is a single spectrum it prints the result instead.
probe_area = {'auto'}
This allows the user to specify the probe_area for interaction with
the sample needed specifically for the cross_section method of
quantification. When left as 'auto' the pixel area is used,
calculated from the navigation axes information.
max_iterations : int
An upper limit to the number of calculations for absorption correction.
kwargs
The extra keyword arguments are passed to plot.
Returns
-------
A list of quantified elemental maps (signal) giving the composition of
the sample in weight or atomic percent with absorption correciton taken
into account based on the sample thickness estimate provided.
If the method is 'zeta' this function also returns the mass thickness
profile for the data.
If the method is 'cross_section' this function also returns the atom
counts for each element.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s.add_lines()
>>> kfactors = [1.450226, 5.075602] #For Fe Ka and Pt La
>>> bw = s.estimate_background_windows(line_width=[5.0, 2.0])
>>> s.plot(background_windows=bw)
>>> intensities = s.get_lines_intensity(background_windows=bw)
>>> res = s.quantification(intensities, kfactors, plot_result=True,
>>> composition_units='atomic')
Fe (Fe_Ka): Composition = 15.41 atomic percent
Pt (Pt_La): Composition = 84.59 atomic percent
See also
--------
vacuum_mask
"""
if isinstance(navigation_mask, float):
if self.axes_manager.navigation_dimension > 0:
navigation_mask = self.vacuum_mask(navigation_mask, closing)
else:
navigation_mask = None
xray_lines = [intensity.metadata.Sample.xray_lines[0] for intensity in intensities]
it = 0
if absorption_correction:
if show_progressbar is None: # pragma: no cover
show_progressbar = preferences.General.show_progressbar
if show_progressbar:
pbar = progressbar(total=None,
desc='Absorption correction calculation')
composition = utils.stack(intensities, lazy=False,
show_progressbar=False)
if take_off_angle == 'auto':
toa = self.get_take_off_angle()
else:
toa = take_off_angle
#determining illumination area for cross sections quantification.
if method == 'cross_section':
if probe_area == 'auto':
parameters = self.metadata.Acquisition_instrument.TEM
if probe_area in parameters:
probe_area = parameters.TEM.probe_area
else:
probe_area = self.get_probe_area(
navigation_axes=self.axes_manager.navigation_axes)
int_stack = utils.stack(intensities, lazy=False,
show_progressbar=False)
comp_old = np.zeros_like(int_stack.data)
abs_corr_factor = None # initial
if method == 'CL':
quantification_method = utils_eds.quantification_cliff_lorimer
kwargs = {"intensities" : int_stack.data,
"kfactors" : factors,
"absorption_correction" : abs_corr_factor,
"mask": navigation_mask}
elif method == 'zeta':
quantification_method = utils_eds.quantification_zeta_factor
kwargs = {"intensities" : int_stack.data,
"zfactors" : factors,
"dose" : self._get_dose(method),
"absorption_correction" : abs_corr_factor}
elif method =='cross_section':
quantification_method = utils_eds.quantification_cross_section
kwargs = {"intensities" : int_stack.data,
"cross_sections" : factors,
"dose" : self._get_dose(method, **kwargs),
"absorption_correction" : abs_corr_factor}
else:
raise ValueError('Please specify method for quantification, '
'as "CL", "zeta" or "cross_section".')
while True:
results = quantification_method(**kwargs)
if method == 'CL':
composition.data = results * 100.
if absorption_correction:
if thickness is not None:
mass_thickness = intensities[0].deepcopy()
mass_thickness.data = self.CL_get_mass_thickness(
composition.split(),
thickness
)
mass_thickness.metadata.General.title = 'Mass thickness'
else:
raise ValueError(
'Thickness is required for absorption correction '
'with k-factor method. Results will contain no '
'correction for absorption.'
)
elif method == 'zeta':
composition.data = results[0] * 100
mass_thickness = intensities[0].deepcopy()
mass_thickness.data = results[1]
else:
composition.data = results[0] * 100.
number_of_atoms = composition._deepcopy_with_new_data(results[1])
if method == 'cross_section':
if absorption_correction:
abs_corr_factor = utils_eds.get_abs_corr_cross_section(composition.split(),
number_of_atoms.split(),
toa,
probe_area)
kwargs["absorption_correction"] = abs_corr_factor
else:
if absorption_correction:
abs_corr_factor = utils_eds.get_abs_corr_zeta(composition.split(),
mass_thickness,
toa)
kwargs["absorption_correction"] = abs_corr_factor
res_max = np.max(composition.data - comp_old)
comp_old = composition.data
if absorption_correction and show_progressbar:
pbar.update(1)
it += 1
if not absorption_correction or abs(res_max) < convergence_criterion:
break
elif it >= max_iterations:
raise Exception('Absorption correction failed as solution '
f'did not converge after {max_iterations} '
'iterations')
if method == 'cross_section':
number_of_atoms = composition._deepcopy_with_new_data(results[1])
number_of_atoms = number_of_atoms.split()
composition = composition.split()
else:
composition = composition.split()
#convert ouput units to selection as required.
if composition_units == 'atomic':
if method != 'cross_section':
composition = utils.material.weight_to_atomic(composition)
else:
if method == 'cross_section':
composition = utils.material.atomic_to_weight(composition)
#Label each of the elemental maps in the image stacks for composition.
for i, xray_line in enumerate(xray_lines):
element, line = utils_eds._get_element_and_line(xray_line)
composition[i].metadata.General.title = composition_units + \
' percent of ' + element
composition[i].metadata.set_item("Sample.elements", ([element]))
composition[i].metadata.set_item(
"Sample.xray_lines", ([xray_line]))
if plot_result and composition[i].axes_manager.navigation_size == 1:
c = float(composition[i].data)
print(f"{element} ({xray_line}): Composition = {c:.2f} percent")
#For the cross section method this is repeated for the number of atom maps
if method == 'cross_section':
for i, xray_line in enumerate(xray_lines):
element, line = utils_eds._get_element_and_line(xray_line)
number_of_atoms[i].metadata.General.title = \
'atom counts of ' + element
number_of_atoms[i].metadata.set_item("Sample.elements",
([element]))
number_of_atoms[i].metadata.set_item(
"Sample.xray_lines", ([xray_line]))
if plot_result and composition[i].axes_manager.navigation_size != 1:
utils.plot.plot_signals(composition, **kwargs)
if absorption_correction:
_logger.info(f'Conversion found after {it} interations.')
if method == 'zeta':
mass_thickness.metadata.General.title = 'Mass thickness'
self.metadata.set_item("Sample.mass_thickness", mass_thickness)
return composition, mass_thickness
elif method == 'cross_section':
return composition, number_of_atoms
elif method == 'CL':
if absorption_correction:
mass_thickness.metadata.General.title = 'Mass thickness'
return composition, mass_thickness
else:
return composition
else:
raise ValueError('Please specify method for quantification, as '
'"CL", "zeta" or "cross_section"')
def vacuum_mask(self, threshold=1.0, closing=True, opening=False):
"""
Generate mask of the vacuum region
Parameters
----------
threshold: float
For a given pixel, maximum value in the energy axis below which the
pixel is considered as vacuum.
closing: bool
If true, applied a morphologic closing to the mask
opnening: bool
If true, applied a morphologic opening to the mask
Returns
-------
mask: signal
The mask of the region
Examples
--------
>>> # Simulate a spectrum image with vacuum region
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> s_vac = hs.signals.BaseSignal(
np.ones_like(s.data, dtype=float))*0.005
>>> s_vac.add_poissonian_noise()
>>> si = hs.stack([s]*3 + [s_vac])
>>> si.vacuum_mask().data
array([False, False, False, True], dtype=bool)
"""
if self.axes_manager.navigation_dimension == 0:
raise RuntimeError('Navigation dimenstion must be higher than 0 '
'to estimate a vacuum mask.')
from scipy.ndimage.morphology import binary_dilation, binary_erosion
mask = (self.max(-1) <= threshold)
if closing:
mask.data = binary_dilation(mask.data, border_value=0)
mask.data = binary_erosion(mask.data, border_value=1)
if opening:
mask.data = binary_erosion(mask.data, border_value=1)
mask.data = binary_dilation(mask.data, border_value=0)
return mask
def decomposition(self,
normalize_poissonian_noise=True,
navigation_mask=1.0,
closing=True,
*args,
**kwargs):
"""Apply a decomposition to a dataset with a choice of algorithms.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <mva.decomposition>`.
Parameters
----------
normalize_poissonian_noise : bool, default True
If True, scale the signal to normalize Poissonian noise using
the approach described in [Keenan2004]_.
navigation_mask : None or float or boolean numpy array, default 1.0
The navigation locations marked as True are not used in the
decomposition. If float is given the vacuum_mask method is used to
generate a mask with the float value as threshold.
closing: bool, default True
If true, applied a morphologic closing to the mask obtained by
vacuum_mask.
algorithm : {"SVD", "MLPCA", "sklearn_pca", "NMF", "sparse_pca", "mini_batch_sparse_pca", "RPCA", "ORPCA", "ORNMF", custom object}, default "SVD"
The decomposition algorithm to use. If algorithm is an object,
it must implement a ``fit_transform()`` method or ``fit()`` and
``transform()`` methods, in the same manner as a scikit-learn estimator.
output_dimension : None or int
Number of components to keep/calculate.
Default is None, i.e. ``min(data.shape)``.
centre : {None, "navigation", "signal"}, default None
* If None, the data is not centered prior to decomposition.
* If "navigation", the data is centered along the navigation axis.
Only used by the "SVD" algorithm.
* If "signal", the data is centered along the signal axis.
Only used by the "SVD" algorithm.
auto_transpose : bool, default True
If True, automatically transposes the data to boost performance.
Only used by the "SVD" algorithm.
signal_mask : boolean numpy array
The signal locations marked as True are not used in the
decomposition.
var_array : numpy array
Array of variance for the maximum likelihood PCA algorithm.
Only used by the "MLPCA" algorithm.
var_func : None or function or numpy array, default None
* If None, ignored
* If function, applies the function to the data to obtain ``var_array``.
Only used by the "MLPCA" algorithm.
* If numpy array, creates ``var_array`` by applying a polynomial function
defined by the array of coefficients to the data. Only used by
the "MLPCA" algorithm.
reproject : {None, "signal", "navigation", "both"}, default None
If not None, the results of the decomposition will be projected in
the selected masked area.
return_info: bool, default False
The result of the decomposition is stored internally. However,
some algorithms generate some extra information that is not
stored. If True, return any extra information if available.
In the case of sklearn.decomposition objects, this includes the
sklearn Estimator object.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
svd_solver : {"auto", "full", "arpack", "randomized"}, default "auto"
If auto:
The solver is selected by a default policy based on `data.shape` and
`output_dimension`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient "randomized"
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
If full:
run exact SVD, calling the standard LAPACK solver via
:py:func:`scipy.linalg.svd`, and select the components by postprocessing
If arpack:
use truncated SVD, calling ARPACK solver via
:py:func:`scipy.sparse.linalg.svds`. It requires strictly
`0 < output_dimension < min(data.shape)`
If randomized:
use truncated SVD, calling :py:func:`sklearn.utils.extmath.randomized_svd`
to estimate a limited number of components
copy : bool, default True
* If True, stores a copy of the data before any pre-treatments
such as normalization in ``s._data_before_treatments``. The original
data can then be restored by calling ``s.undo_treatments()``.
* If False, no copy is made. This can be beneficial for memory
usage, but care must be taken since data will be overwritten.
**kwargs : extra keyword arguments
Any keyword arguments are passed to the decomposition algorithm.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> si = hs.stack([s]*3)
>>> si.change_dtype(float)
>>> si.decomposition()
See also
--------
vacuum_mask
"""
if isinstance(navigation_mask, float):
navigation_mask = self.vacuum_mask(navigation_mask, closing)
super().decomposition(
normalize_poissonian_noise=normalize_poissonian_noise,
navigation_mask=navigation_mask, *args, **kwargs)
self.learning_results.loadings = np.nan_to_num(
self.learning_results.loadings)
def create_model(self, auto_background=True, auto_add_lines=True,
*args, **kwargs):
"""Create a model for the current TEM EDS data.
Parameters
----------
auto_background : bool, default True
If True, adds automatically a polynomial order 6 to the model,
using the edsmodel.add_polynomial_background method.
auto_add_lines : bool, default True
If True, automatically add Gaussians for all X-rays generated in
the energy range by an element using the edsmodel.add_family_lines
method.
dictionary : {None, dict}, optional
A dictionary to be used to recreate a model. Usually generated
using :meth:`hyperspy.model.as_dictionary`
Returns
-------
model : `EDSTEMModel` instance.
"""
from hyperspy.models.edstemmodel import EDSTEMModel
model = EDSTEMModel(self,
auto_background=auto_background,
auto_add_lines=auto_add_lines,
*args, **kwargs)
return model
def get_probe_area(self, navigation_axes=None):
"""
Calculates a pixel area which can be approximated to probe area,
when the beam is larger than or equal to pixel size.
The probe area can be calculated only when the number of navigation
dimension are less than 2 and all the units have the dimensions of
length.
Parameters
----------
navigation_axes : DataAxis, string or integer (or list of)
Navigation axes corresponding to the probe area. If string or
integer, the provided value is used to index the ``axes_manager``.
Returns
-------
probe area in nm².
Examples
--------
>>> s = hs.datasets.example_signals.EDS_TEM_Spectrum()
>>> si = hs.stack([s]*3)
>>> si.axes_manager.navigation_axes[0].scale = 0.01
>>> si.axes_manager.navigation_axes[0].units = 'μm'
>>> si.get_probe_area()
100.0
"""
if navigation_axes is None:
navigation_axes = self.axes_manager.navigation_axes
elif not isiterable(navigation_axes):
navigation_axes = [navigation_axes]
if len(navigation_axes) == 0:
raise ValueError("The navigation dimension is zero, the probe "
"area can not be calculated automatically.")
elif len(navigation_axes) > 2:
raise ValueError("The navigation axes corresponding to the probe "
"are ambiguous and the probe area can not be "
"calculated automatically.")
scales = []
for axis in navigation_axes:
try:
if not isinstance(navigation_axes, DataAxis):
axis = self.axes_manager[axis]
scales.append(axis.convert_to_units('nm', inplace=False)[0])
except pint.DimensionalityError:
raise ValueError(f"The unit of the axis {axis} has not the "
"dimension of length.")
if len(scales) == 1:
probe_area = scales[0] ** 2
else:
probe_area = scales[0] * scales[1]
if probe_area == 1:
warnings.warn("Please note that the probe area has been "
"calculated to be 1 nm², meaning that it is highly "
"likley that the scale of the navigation axes have not "
"been set correctly. Please read the user "
"guide for how to set this.")
return probe_area
def _get_dose(self, method, beam_current='auto', live_time='auto',
probe_area='auto'):
"""
Calculates the total electron dose for the zeta-factor or cross section
methods of quantification.
Input given by i*t*N, i the current, t the
acquisition time, and N the number of electron by unit electric charge.
Parameters
----------
method : 'zeta' or 'cross_section'
If 'zeta', the dose is given by i*t*N
If 'cross section', the dose is given by i*t*N/A
where i is the beam current, t is the acquistion time,
N is the number of electrons per unit charge (1/e) and
A is the illuminated beam area or pixel area.
beam_current: float
Probe current in nA
live_time: float
Acquisiton time in s, compensated for the dead time of the detector.
probe_area: float or 'auto'
The illumination area of the electron beam in nm².
If 'auto' the value is extracted from the scale axes_manager.
Therefore we assume the probe is oversampling such that
the illumination area can be approximated to the pixel area of the
spectrum image.
Returns
--------
Dose in electrons (zeta factor) or electrons per nm² (cross_section)
See also
--------
set_microscope_parameters
"""
parameters = self.metadata.Acquisition_instrument.TEM
if beam_current == 'auto':
beam_current = parameters.get_item('beam_current')
if beam_current is None:
raise Exception('Electron dose could not be calculated as the '
'beam current is not set. It can set using '
'`set_microscope_parameters()`.')
if live_time == 'auto':
live_time = parameters.get_item('Detector.EDS.live_time')
if live_time is None:
raise Exception('Electron dose could not be calculated as '
'live time is not set. It can set using '
'`set_microscope_parameters()`.')
if method == 'cross_section':
if probe_area == 'auto':
probe_area = parameters.get_item('probe_area')
if probe_area is None:
probe_area = self.get_probe_area(
navigation_axes=self.axes_manager.navigation_axes)
return (live_time * beam_current * 1e-9) / (constants.e * probe_area)
# 1e-9 is included here because the beam_current is in nA.
elif method == 'zeta':
return live_time * beam_current * 1e-9 / constants.e
else:
raise Exception("Method need to be 'zeta' or 'cross_section'.")
@staticmethod
def CL_get_mass_thickness(weight_percent, thickness):
"""
Creates a array of mass_thickness based on a known material composition
and measured thickness. Required for absorption correction calcultions
using the Cliff Lorimer method.
Parameters
----------
weight_percent : :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
Stack of compositions as determined from an initial k_factor
quantification.
thickness : float or :py:class:`numpy.ndarray`
Either a float value for thickness in nm or an array equal to the
size of the EDX map with thickness at each position of the sample.
Returns
-------
mass_thickness : :py:class:`numpy.ndarray`
Mass thickness in kg/m².
"""
if isinstance(thickness, (float, int)):
thickness_map = np.ones_like(weight_percent[0]) * thickness
else:
thickness_map = thickness
elements = [intensity.metadata.Sample.elements[0] for intensity in weight_percent]
mass_thickness = np.zeros_like(weight_percent[0])
densities = np.array(
[elements_db[element]['Physical_properties']['density (g/cm^3)']
for element in elements])
for density, element_composition in zip(densities, weight_percent):
# convert composition from % to fraction: factor of 1E-2
# convert thickness from nm to m: factor of 1E-9
# convert density from g/cm3 to kg/m2: factor of 1E3
elemental_mt = element_composition * thickness_map * density * 1E-8
mass_thickness += elemental_mt
return mass_thickness
class LazyEDSTEMSpectrum(EDSTEMSpectrum, LazyEDSSpectrum):
pass
| gpl-3.0 |
icdishb/scikit-learn | sklearn/utils/tests/test_testing.py | 33 | 3783 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
PuchatekwSzortach/printed_characters_net | scripts/debugging/visualize_net_training.py | 1 | 2148 | """
A simple program for visualizing networks development as it is trained
"""
import shelve
import configobj
import matplotlib.pyplot as plt
import seaborn
def plot_training_data(training_data):
epochs = sorted(training_data.keys())
statistics_count = 2
layers_count = len(training_data[0]['weights_percentiles'])
plots_count = statistics_count + layers_count
figure, axes = plt.subplots(plots_count, 1, sharex=True)
accuracy = [training_data[epoch]['accuracy'] for epoch in epochs]
axes[0].plot(epochs, accuracy)
axes[0].set_ylim([0, 1])
axes[0].set_title('Accuracy')
error_cost = [training_data[epoch]['error_cost'] for epoch in epochs]
regularization_cost = [training_data[epoch]['regularization_cost'] for epoch in epochs]
axes[1].plot(epochs, error_cost, label="error_cost")
axes[1].plot(epochs, regularization_cost, label="regularization_cost")
axes[1].set_title('Cost')
axes[1].legend()
# Plot weights
for layer, axis in zip(range(layers_count), axes[statistics_count:]):
# This gives me a list of epochs size, each element of which contains values of different
# percentiles at that epoch
weights_percentiles = [training_data[epoch]['weights_percentiles'][layer] for epoch in epochs]
# Now transpose above, so we have n percentiles lists, each of epochs numbers length
individual_percentiles = [percentile for percentile in zip(*weights_percentiles)]
labels = ['0%', '25%', '50%', '75%', '100%']
for percentile, label in zip(individual_percentiles, labels):
axis.plot(epochs, percentile, label=label)
axis.fill_between(epochs, 0, percentile, color=(0, 0.7, 1, 0.2))
axis.set_title("Layer {} weights percentiles".format(layer))
axis.legend()
plt.show()
def main():
database_path = configobj.ConfigObj('configuration.ini')['database_path']
shelf = shelve.open(database_path, writeback=True)
print(shelf['network_parameters'])
print(shelf['hyperparameters'])
plot_training_data(shelf['training'])
if __name__ == "__main__":
main()
| mit |
deepakantony/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelFrame.py | 22 | 2847 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
pYh = np.unwrap(np.angle(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
pXr = np.unwrap(np.angle(Xr[:Ns/2]))
xrw = np.real(fftshift(ifft(Xr))) * H * 2
yhw = np.real(fftshift(ifft(Yh))) * H * 2
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,2,1)
plt.plot(np.arange(M), x[pos-hM1:pos+hM2]*w, lw=1.5)
plt.axis([0, M, min(x[pos-hM1:pos+hM2]*w), max(x[pos-hM1:pos+hM2]*w)])
plt.title('x (flute-A4.wav)')
plt.subplot(3,2,3)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-90,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(3,2,5)
plt.plot(binFreq,pX,'c', lw=1.5)
plt.axis([0,maxplotfreq,0,16])
plt.plot(hfreq, hphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + harmonics')
plt.subplot(3,2,4)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.8, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.5, label='mXr')
plt.axis([0,maxplotfreq,-90,max(mYh)+2])
plt.legend(prop={'size':10})
plt.title('mYh + mXr')
plt.subplot(3,2,6)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,pYh,'c', lw=.8, label='pYh')
plt.plot(binFreq,pXr,'c', lw=1.5, label ='pXr')
plt.axis([0,maxplotfreq,-5,25])
plt.legend(prop={'size':10})
plt.title('pYh + pXr')
plt.subplot(3,2,2)
plt.plot(np.arange(Ns), yhw, 'b', lw=.8, label='yh')
plt.plot(np.arange(Ns), xrw, 'b', lw=1.5, label='xr')
plt.axis([0, Ns, min(yhw), max(yhw)])
plt.legend(prop={'size':10})
plt.title('yh + xr')
plt.tight_layout()
plt.savefig('hprModelFrame.png')
plt.show()
| agpl-3.0 |
gary159/Cure_Gun_Violence | gunviolence/gunviolence/views.py | 1 | 2167 | from gunviolence import app
from flask import Flask, render_template, url_for, jsonify
from werkzeug.serving import run_simple
from ConfigUtil import config
from ChicagoData import comm
import pandas as pd
import numpy as np
import random
def gen_hex_colour_code():
return ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
key=config['GOOGLE_MAPS_KEY']
map_dict = {
'identifier': 'view-side',
'zoom': 11,
'maptype': 'ROADMAP',
'zoom_control': True,
'scroll_wheel': False,
'fullscreen_control': False,
'rorate_control': False,
'maptype_control': False,
'streetview_control': False,
'scale_control': True,
'style': 'height:800px;width:600px;margin:0;'}
@app.route('/')
def main_page():
return render_template('main_page.html')
@app.route('/<string:city>')
def city(city, map_dict=map_dict):
map_dict['center'] = tuple(config['center'][city])
return render_template('city.html', date_dropdown=[d for d in enumerate(comm.date_list)], api_key=key, city=city)
@app.route('/<string:city>/<string:dt_filter>')
def monthly_data(city, dt_filter, map_dict=map_dict):
map_dict['center'] = tuple(config['center'][city])
if dt_filter!='0':
cols = set(comm.data.columns) - set(comm.date_list)
cols |= set([dt_filter])
comm_data = comm.geom_to_list(comm.data[list(cols)])
comm_data.loc[:, dt_filter] = comm_data[dt_filter].fillna(0)
comm_data.loc[:, 'norm'] = np.linalg.norm(comm_data[dt_filter].fillna(0))
comm_data.loc[:, 'fill_opacity'] = comm_data[dt_filter]/comm_data['norm']
else:
comm_data=pd.DataFrame([])
polyargs = {}
polyargs['stroke_color'] = '#FF0000'
polyargs['fill_color'] = '#FF0000'
polyargs['stroke_opacity'] = 1
polyargs['stroke_weight'] = .2
return jsonify({'selected_dt': dt_filter, 'map_dict': map_dict, 'polyargs': polyargs, 'results': comm_data.to_dict()})
if __name__ == '__main__':
run_simple('localhost', 5000, app,
use_reloader=True, use_debugger=True, use_evalex=True)
| apache-2.0 |
B3AU/waveTree | benchmarks/bench_plot_nmf.py | 5 | 5815 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
import gc
from time import time
import numpy as np
from collections import defaultdict
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 10000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if True or (i % 10) == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def compute_bench(samples_range, features_range, rank=50, tolerance=1e-7):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
print(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
print(np.linalg.norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = compute_bench(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmkar results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
jordancheah/zipline | zipline/transforms/ta.py | 32 | 7988 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import numpy as np
import pandas as pd
import talib
import copy
from six import iteritems
from zipline.transforms import BatchTransform
def zipline_wrapper(talib_fn, key_map, data):
# get required TA-Lib input names
if 'price' in talib_fn.input_names:
req_inputs = [talib_fn.input_names['price']]
elif 'prices' in talib_fn.input_names:
req_inputs = talib_fn.input_names['prices']
else:
req_inputs = []
# If there are multiple output names then the results are named,
# if there is only one output name, it usually 'real' is best represented
# by a float.
# Use a DataFrame to map sid to named values, and a Series map sid
# to floats.
if len(talib_fn.output_names) > 1:
all_results = pd.DataFrame(index=talib_fn.output_names,
columns=data.minor_axis)
else:
all_results = pd.Series(index=data.minor_axis)
for sid in data.minor_axis:
# build talib_data from zipline data
talib_data = dict()
for talib_key, zipline_key in iteritems(key_map):
# if zipline_key is found, add it to talib_data
if zipline_key in data:
values = data[zipline_key][sid].values
# Do not include sids that have only nans, passing only nans
# is incompatible with many of the underlying TALib functions.
if pd.isnull(values).all():
break
else:
talib_data[talib_key] = data[zipline_key][sid].values
# if zipline_key is not found and not required, add zeros
elif talib_key not in req_inputs:
talib_data[talib_key] = np.zeros(data.shape[1])
# if zipline key is not found and required, raise error
else:
raise KeyError(
'Tried to set required TA-Lib data with key '
'\'{0}\' but no Zipline data is available under '
'expected key \'{1}\'.'.format(
talib_key, zipline_key))
# call talib
if talib_data:
talib_result = talib_fn(talib_data)
# keep only the most recent result
if isinstance(talib_result, (list, tuple)):
sid_result = tuple([r[-1] for r in talib_result])
else:
sid_result = talib_result[-1]
all_results[sid] = sid_result
return all_results
def make_transform(talib_fn, name):
"""
A factory for BatchTransforms based on TALIB abstract functions.
"""
# make class docstring
header = '\n#---- TA-Lib docs\n\n'
talib_docs = getattr(talib, talib_fn.info['name']).__doc__
divider1 = '\n#---- Default mapping (TA-Lib : Zipline)\n\n'
mappings = '\n'.join(' {0} : {1}'.format(k, v)
for k, v in talib_fn.input_names.items())
divider2 = '\n\n#---- Zipline docs\n'
help_str = header + talib_docs + divider1 + mappings + divider2
class TALibTransform(BatchTransform):
__doc__ = help_str + """
TA-Lib keyword arguments must be passed at initialization. For
example, to construct a moving average with timeperiod of 5, pass
"timeperiod=5" during initialization.
All abstract TA-Lib functions accept a data dictionary containing
'open', 'high', 'low', 'close', and 'volume' keys, even if they do
not require those keys to run. For example, talib.MA (moving
average) is always computed using the data under the 'close'
key. By default, Zipline constructs this data dictionary with the
appropriate sid data, but users may overwrite this by passing
mappings as keyword arguments. For example, to compute the moving
average of the sid's high, provide "close = 'high'" and Zipline's
'high' data will be used as TA-Lib's 'close' data. Similarly, if a
user had a data column named 'Oil', they could compute its moving
average by passing "close='Oil'".
**Example**
A moving average of a data column called 'Oil' with timeperiod 5,
talib.transforms.ta.MA(close='Oil', timeperiod=5)
The user could find the default arguments and mappings by calling:
help(zipline.transforms.ta.MA)
**Arguments**
open : string, default 'open'
high : string, default 'high'
low : string, default 'low'
close : string, default 'price'
volume : string, default 'volume'
refresh_period : int, default 0
The refresh_period of the BatchTransform determines the number
of iterations that pass before the BatchTransform updates its
internal data.
\*\*kwargs : any arguments to be passed to the TA-Lib function.
"""
def __init__(self,
close='price',
open='open',
high='high',
low='low',
volume='volume',
refresh_period=0,
bars='daily',
**kwargs):
key_map = {'high': high,
'low': low,
'open': open,
'volume': volume,
'close': close}
self.call_kwargs = kwargs
# Make deepcopy of talib abstract function.
# This is necessary because talib abstract functions remember
# state, including parameters, and we need to set the parameters
# in order to compute the lookback period that will determine the
# BatchTransform window_length. TALIB has no way to restore default
# parameters, so the deepcopy lets us change this function's
# parameters without affecting other TALibTransforms of the same
# function.
self.talib_fn = copy.deepcopy(talib_fn)
# set the parameters
for param in self.talib_fn.get_parameters().keys():
if param in kwargs:
self.talib_fn.set_parameters({param: kwargs[param]})
# get the lookback
self.lookback = self.talib_fn.lookback
self.bars = bars
if bars == 'daily':
lookback = self.lookback + 1
elif bars == 'minute':
lookback = int(math.ceil(self.lookback / (6.5 * 60)))
# Ensure that window_length is at least 1 day's worth of data.
window_length = max(lookback, 1)
transform_func = functools.partial(
zipline_wrapper, self.talib_fn, key_map)
super(TALibTransform, self).__init__(
func=transform_func,
refresh_period=refresh_period,
window_length=window_length,
compute_only_full=False,
bars=bars)
def __repr__(self):
return 'Zipline BatchTransform: {0}'.format(
self.talib_fn.info['name'])
TALibTransform.__name__ = name
# return class
return TALibTransform
# add all TA-Lib functions to locals
for name in talib.abstract.__FUNCTION_NAMES:
fn = getattr(talib.abstract, name)
locals()[name] = make_transform(fn, name)
| apache-2.0 |
Sunhick/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
convexopt/gpkit | gpkit/interactive/plotting.py | 2 | 2075 | """Plotting methods"""
import matplotlib.pyplot as plt
import numpy as np
from .plot_sweep import assign_axes
from .. import GPCOLORS
def compare(models, sweeps, posys, tol=0.001):
"""Compares the values of posys over a sweep of several models.
If posys is of the same length as models, this will plot different
variables from different models.
Currently only supports a single sweepvar.
Example Usage:
compare([aec, fbc], {"R": (160, 300)},
["cost", ("W_{\\rm batt}", "W_{\\rm fuel}")], tol=0.001)
"""
sols = [m.autosweep(sweeps, tol, verbosity=0) for m in models]
posys, axes = assign_axes(sols[0].bst.sweptvar, posys, None)
for posy, ax in zip(posys, axes):
for i, sol in enumerate(sols):
if hasattr(posy, "__len__") and len(posy) == len(sols):
p = posy[i]
else:
p = posy
color = GPCOLORS[i % len(GPCOLORS)]
if sol._is_cost(p): # pylint: disable=protected-access
ax.fill_between(sol.sampled_at,
sol.cost_lb(), sol.cost_ub(),
facecolor=color, edgecolor=color,
linewidth=0.75)
else:
ax.plot(sol.sampled_at, sol(p), color=color)
def plot_convergence(model):
"""Plots the convergence of a signomial programming model
Arguments
---------
model: Model
Signomial programming model that has already been solved
Returns
-------
matplotlib.pyplot Figure
Plot of cost as functions of SP iteration #
"""
fig, ax = plt.subplots()
it = np.array([])
cost = np.array([])
for n in range(len(model.program.gps)):
try:
cost = np.append(cost, model.program.gps[n].result['cost'])
it = np.append(it, n+1)
except TypeError:
pass
ax.plot(it, cost, '-o')
ax.set_xlabel('Iteration')
ax.set_ylabel('Cost')
ax.set_xticks(range(1, len(model.program.gps)+1))
return fig, ax
| mit |
CanisMajoris/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
mirestrepo/voxels-at-lems | registration_eval/results/pert_compute_transformation_error.py | 1 | 8494 | #!/usr/bin/env python
# encoding: utf-8
"""
compute_transformation_error.py
Created by Maria Isabel Restrepo on 2012-09-24.
Copyright (c) 2012 . All rights reserved.
This script computes the distances betweeen an estimated similarity transformation and its ground truth
The transformation is used to transform a "source" coordinate system into a "target coordinate system"
To compute the error between the translations, the L2 norm diference translation vectors in the
"source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied.
The rotation error is computed as the half angle between the normalized
quaternions i.e acos(|<q1,q2>|) in [0, pi/2]
"""
import os
import sys
import logging
import argparse
from vpcl_adaptor import *
import numpy as np
from numpy import linalg as LA
import transformations as tf
import math
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
import reg3d
if __name__ == '__main__':
# root_dir = "/Users/isa/Experiments/reg3d_eval/downtown_2006";
# geo_tfile ="/data/lidar_providence/downtown_offset-1-financial-dan-Hs.txt"
# root_dir = "/Users/isa/Experiments/reg3d_eval/capitol_2006";
# geo_tfile ="/data/lidar_providence/capitol/capitol-dan_Hs.txt"
root_dir = "/Users/isa/Experiments/reg3d_eval/res_east_side";
geo_tfile ="/data/lidar_providence/east_side/res_east_side_Hs.txt"
# plot_errors = True
# descriptors = ["FPFH", "SHOT"]
plot_errors = False
descriptors = ["SHOT"]
sigma = [0.05, 0.1, 0.15]
sigma_str = ["005", "01", "015"]
niter = 500
radius = 30
percentile = 99
ntrials = 10
if (plot_errors):
colors = ['magenta','blue','green', 'red', 'black']
markers = ['-o', '--v', '-s', '--^']
ms = [12,12,12,12]
figT = plt.figure()
figR = plt.figure()
axT = figT.add_subplot(111);
axR = figR.add_subplot(111);
figT_detail = plt.figure()
figR_detail = plt.figure()
axT_detail = figT_detail.add_subplot(111);
axR_detail = figR_detail.add_subplot(111);
plt.hold(True);
plt.axis(tight=False);
IA_error_mean= np.zeros((len(sigma), 3));
IA_error_min= np.zeros((len(sigma), 3))
IA_error_max= np.zeros((len(sigma), 3))
IA_error_median= np.zeros((len(sigma), 3))
ICP_error_mean= np.zeros((len(sigma), 3));
ICP_error_min= np.zeros((len(sigma), 3))
ICP_error_max= np.zeros((len(sigma), 3))
ICP_error_median= np.zeros((len(sigma), 3))
for d_idx in range(0, len(descriptors)):
descriptor = descriptors[d_idx];
print "Descriptor: ", descriptor
for sigma_idx in range(0,len(sigma)):
print "***** Iter: " , niter
IA_error = np.zeros((ntrials, 3));
ICP_error = np.zeros((ntrials, 3));
for trial in range(0,ntrials):
trial_root_dir = root_dir + "/pert_" + sigma_str[sigma_idx] + "_" + str(trial)
print "***** Trial: " , trial
IA_error[trial,:], ICP_error[trial,:] = reg3d.transformation_error(root_dir = trial_root_dir,
descriptor_type = descriptor,
percentile = percentile,
nr_iterations = niter,
gt_fname = "Hs-identity.txt",
geo_tfile = geo_tfile)
print "IA:"
print IA_error
print "ICP Error:"
print ICP_error
#Compute mean, max and min
IA_error_mean[sigma_idx, :] = np.mean(IA_error, axis=0)
ICP_error_mean[sigma_idx, :]= np.mean(ICP_error, axis=0)
IA_error_max[sigma_idx, :] = np.max(IA_error, axis=0)
ICP_error_max[sigma_idx, :]= np.max(ICP_error, axis=0)
IA_error_min[sigma_idx, :] = np.min(IA_error, axis=0)
ICP_error_min[sigma_idx, :]= np.min(ICP_error, axis=0)
IA_error_median[sigma_idx, :] = np.median(IA_error, axis=0)
ICP_error_median[sigma_idx, :]= np.median(ICP_error, axis=0)
import code; code.interact(local=locals())
if (plot_errors):
#plot IA, ICP --> missing to to ICP_normals
axT.errorbar(sigma, IA_error_median[:, 2],
yerr=[ IA_error_median[:, 2]-IA_error_min[:, 2],
IA_error_max[:, 2]- IA_error_median[:, 2]],
fmt=markers[2*d_idx], color=colors[2*d_idx],
label=descriptor + " FA", capsize=12, ms = ms[2*d_idx]);
axT.errorbar(sigma, ICP_error_median[:, 2],
yerr=[ ICP_error_median[:, 2]-ICP_error_min[:, 2],
ICP_error_max[:, 2]- ICP_error_median[:, 2]],
fmt=markers[2*d_idx+1], color=colors[2*d_idx+1],
label=descriptor + " FA+ICP", capsize=12, ms=ms[2*d_idx+1])
axR.errorbar(sigma, IA_error_median[:, 1],
yerr=[IA_error_median[:, 1]- IA_error_min[:, 1],
IA_error_max[:, 1]-IA_error_median[:, 1]],
fmt=markers[2*d_idx], color=colors[2*d_idx],
label=descriptor + " FA", capsize=12, ms = ms[2*d_idx]);
axR.errorbar(sigma, ICP_error_median[:, 1],
yerr=[ICP_error_median[:, 1] - ICP_error_min[:, 1],
ICP_error_max[:, 1]- ICP_error_median[:, 1]],
fmt=markers[2*d_idx+1], color=colors[2*d_idx+1],
label=descriptor + " FA+ICP", capsize=12, ms=ms[2*d_idx+1])
#********************Detail Plot***************************
axT_detail.plot(sigma[0:2], IA_error_median[0:2, 2],
markers[2*d_idx], color=colors[2*d_idx],
label=descriptor + " FA");
axT_detail.plot(sigma[0:2], ICP_error_median[0:2, 2],
markers[2*d_idx+1], color=colors[2*d_idx+1],
label=descriptor + " FA+ICP", ms=14)
axR_detail.plot(sigma[0:2], IA_error_median[0:2, 1],
markers[2*d_idx], color=colors[2*d_idx],
label=descriptor + " FA");
axR_detail.plot(sigma[0:2], ICP_error_median[0:2, 1],
markers[2*d_idx+1], color=colors[2*d_idx+1],
label=descriptor + " FA+ICP", ms=14)
if (plot_errors):
axT.set_xlabel('Camera Noise ($\sigma$)',fontsize= 20);
axT.set_ylabel('Error (meters)',fontsize= 20);
# axT.set_xlim((0,0.16) );
# axT.set_yticks(np.arange(0.0,250.0,20));
# axT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=4, mode="expand", borderaxespad=0.)
axT.legend(loc='upper left', frameon=False);
figT.savefig( root_dir + "/T_error_pert.pdf",
transparent=True, pad_inches=5)
axR.set_xlabel('Camera Noise ($\sigma$)',fontsize= 20)
axR.set_ylabel('Error (degrees)', fontsize = 20)
# axR.set_xlim((0, 0.16))
# axR.set_yticks(np.arange(0.0, np.pi/2 + np.pi/18 , np.pi/18))
# axR.set_yticklabels(np.arange(0, 100, 10))
axR.legend(loc='upper left', frameon=False);
figR.savefig( root_dir + "/R_error_pert.pdf",
transparent=True)
#********************Detail Plot***************************
# axT_detail.set_xlim((90,510) );
# axT_detail.set_xticks(np.arange(100,510,100) );
# axT_detail.set_yticks(np.arange(0,25,3));
# axT_detail.set_ylim((0,25))
figT_detail.savefig( root_dir + "/T_detail_error_pert.pdf", transparent=True)
# axR_detail.set_xlim((90,510) );
# axR_detail.set_xticks(np.arange(100,510,100) );
# axR_detail.set_yticks(np.arange(0.0, 7*np.pi/180 + 5*np.pi/180 , np.pi/180));
# axR_detail.set_yticklabels(np.arange(0, 7, 1))
# axR_detail.set_ylim((0,7*np.pi/180))
figR_detail.savefig( root_dir + "/R_detail_error_pert.pdf", transparent=True)
plt.show()
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.