repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
ellisonbg/altair | altair/utils/core.py | 1 | 10929 | """
Utility routines
"""
import re
import warnings
import collections
from copy import deepcopy
import sys
import traceback
import six
import pandas as pd
import numpy as np
try:
from pandas.api.types import infer_dtype
except ImportError: # Pandas before 0.20.0
from pandas.lib import infer_dtype
from .schemapi import SchemaBase, Undefined
TYPECODE_MAP = {'ordinal': 'O',
'nominal': 'N',
'quantitative': 'Q',
'temporal': 'T'}
INV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}
def infer_vegalite_type(data):
"""
From an array-like input, infer the correct vega typecode
('ordinal', 'nominal', 'quantitative', or 'temporal')
Parameters
----------
data: Numpy array or Pandas Series
"""
# Otherwise, infer based on the dtype of the input
typ = infer_dtype(data)
# TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
if typ in ['floating', 'mixed-integer-float', 'integer',
'mixed-integer', 'complex']:
return 'quantitative'
elif typ in ['string', 'bytes', 'categorical', 'boolean', 'mixed', 'unicode']:
return 'nominal'
elif typ in ['datetime', 'datetime64', 'timedelta',
'timedelta64', 'date', 'time', 'period']:
return 'temporal'
else:
warnings.warn("I don't know how to infer vegalite type from '{0}'. "
"Defaulting to nominal.".format(typ))
return 'nominal'
def sanitize_dataframe(df):
"""Sanitize a DataFrame to prepare it for serialization.
* Make a copy
* Raise ValueError if it has a hierarchical index.
* Convert categoricals to strings.
* Convert np.bool_ dtypes to Python bool objects
* Convert np.int dtypes to Python int objects
* Convert floats to objects and replace NaNs by None.
* Convert DateTime dtypes into appropriate string representations
"""
df = df.copy()
if isinstance(df.index, pd.core.index.MultiIndex):
raise ValueError('Hierarchical indices not supported')
if isinstance(df.columns, pd.core.index.MultiIndex):
raise ValueError('Hierarchical indices not supported')
def to_list_if_array(val):
if isinstance(val, np.ndarray):
return val.tolist()
else:
return val
for col_name, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
# XXXX: work around bug in to_json for categorical types
# https://github.com/pydata/pandas/issues/10778
df[col_name] = df[col_name].astype(str)
elif str(dtype) == 'bool':
# convert numpy bools to objects; np.bool is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.integer):
# convert integers to objects; np.int is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.floating):
# For floats, convert nan->None: np.float is not JSON serializable
col = df[col_name].astype(object)
df[col_name] = col.where(col.notnull(), None)
elif str(dtype).startswith('datetime'):
# Convert datetimes to strings
# astype(str) will choose the appropriate resolution
df[col_name] = df[col_name].astype(str).replace('NaT', '')
elif dtype == object:
# Convert numpy arrays saved as objects to lists
# Arrays are not JSON serializable
col = df[col_name].apply(to_list_if_array, convert_dtype=False)
df[col_name] = col.where(col.notnull(), None)
return df
def _parse_shorthand(shorthand):
"""
Parse the shorthand expression for aggregation, field, and type.
These are of the form:
- "col_name"
- "col_name:O"
- "average(col_name)"
- "average(col_name):O"
Parameters
----------
shorthand: str
Shorthand string
Returns
-------
D : dict
Dictionary containing the field, aggregate, and typecode
"""
if not shorthand:
return {}
# List taken from vega-lite v2 AggregateOp
valid_aggregates = ["argmax", "argmin", "average", "count", "distinct",
"max", "mean", "median", "min", "missing", "q1", "q3",
"ci0", "ci1", "stderr", "stdev", "stdevp", "sum",
"valid", "values", "variance", "variancep"]
valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)
# build regular expressions
units = dict(field='(?P<field>.*)',
type='(?P<type>{0})'.format('|'.join(valid_typecodes)),
count='(?P<aggregate>count)',
aggregate='(?P<aggregate>{0})'.format('|'.join(valid_aggregates)))
patterns = [r'{count}\(\)',
r'{count}\(\):{type}',
r'{aggregate}\({field}\):{type}',
r'{aggregate}\({field}\)',
r'{field}:{type}',
r'{field}']
regexps = (re.compile('\A' + p.format(**units) + '\Z', re.DOTALL)
for p in patterns)
# find matches depending on valid fields passed
match = next(exp.match(shorthand).groupdict() for exp in regexps
if exp.match(shorthand))
# Handle short form of the type expression
type_ = match.get('type', None)
if type_:
match['type'] = INV_TYPECODE_MAP.get(type_, type_)
# counts are quantitative by default
if match == {'aggregate': 'count'}:
match['type'] = 'quantitative'
return match
def parse_shorthand(shorthand, data=None):
"""Parse the shorthand expression for aggregation, field, and type.
These are of the form:
- "col_name"
- "col_name:O"
- "average(col_name)"
- "average(col_name):O"
Optionally, a dataframe may be supplied, from which the type
will be inferred if not specified in the shorthand.
Parameters
----------
shorthand: str
Shorthand string of the form "agg(col):typ"
data : pd.DataFrame (optional)
Dataframe from which to infer types
Returns
-------
D : dict
Dictionary which always contains a 'field' key, and additionally
contains an 'aggregate' and 'type' key depending on the input.
Examples
--------
>>> data = pd.DataFrame({'foo': ['A', 'B', 'A', 'B'],
... 'bar': [1, 2, 3, 4]})
>>> parse_shorthand('name')
{'field': 'name'}
>>> parse_shorthand('average(col)') # doctest: +SKIP
{'aggregate': 'average', 'field': 'col'}
>>> parse_shorthand('foo:O') # doctest: +SKIP
{'field': 'foo', 'type': 'ordinal'}
>>> parse_shorthand('min(foo):Q') # doctest: +SKIP
{'aggregate': 'min', 'field': 'foo', 'type': 'quantitative'}
>>> parse_shorthand('foo', data) # doctest: +SKIP
{'field': 'foo', 'type': 'nominal'}
>>> parse_shorthand('bar', data) # doctest: +SKIP
{'field': 'bar', 'type': 'quantitative'}
>>> parse_shorthand('bar:O', data) # doctest: +SKIP
{'field': 'bar', 'type': 'ordinal'}
>>> parse_shorthand('sum(bar)', data) # doctest: +SKIP
{'aggregate': 'sum', 'field': 'bar', 'type': 'quantitative'}
>>> parse_shorthand('count()', data) # doctest: +SKIP
{'aggregate': 'count', 'type': 'quantitative'}
"""
attrs = _parse_shorthand(shorthand)
if isinstance(data, pd.DataFrame) and 'type' not in attrs:
if 'field' in attrs and attrs['field'] in data.columns:
attrs['type'] = infer_vegalite_type(data[attrs['field']])
return attrs
def use_signature(Obj):
"""Apply call signature and documentation of Obj to the decorated method"""
def decorate(f):
# call-signature of f is exposed via __wrapped__.
# we want it to mimic Obj.__init__
f.__wrapped__ = Obj.__init__
f._uses_signature = Obj
# Supplement the docstring of f with information from Obj
doclines = Obj.__doc__.splitlines()
if f.__doc__:
doc = f.__doc__ + '\n'.join(doclines[1:])
else:
doc = '\n'.join(doclines)
try:
f.__doc__ = doc
except AttributeError:
# __doc__ is not modifiable for classes in Python < 3.3
pass
return f
return decorate
def update_subtraits(obj, attrs, **kwargs):
"""Recursively update sub-traits without overwriting other traits"""
# TODO: infer keywords from args
if not kwargs:
return obj
# obj can be a SchemaBase object or a dict
if obj is Undefined:
obj = dct = {}
elif isinstance(obj, SchemaBase):
dct = obj._kwds
else:
dct = obj
if isinstance(attrs, six.string_types):
attrs = (attrs,)
if len(attrs) == 0:
dct.update(kwargs)
else:
attr = attrs[0]
trait = dct.get(attr, Undefined)
if trait is Undefined:
trait = dct[attr] = {}
dct[attr] = update_subtraits(trait, attrs[1:], **kwargs)
return obj
def update_nested(original, update, copy=False):
"""Update nested dictionaries
Parameters
----------
original : dict
the original (nested) dictionary, which will be updated in-place
update : dict
the nested dictionary of updates
copy : bool, default False
if True, then copy the original dictionary rather than modifying it
Returns
-------
original : dict
a reference to the (modified) original dict
Examples
--------
>>> original = {'x': {'b': 2, 'c': 4}}
>>> update = {'x': {'b': 5, 'd': 6}, 'y': 40}
>>> update_nested(original, update) # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
>>> original # doctest: +SKIP
{'x': {'b': 5, 'c': 4, 'd': 6}, 'y': 40}
"""
if copy:
original = deepcopy(original)
for key, val in update.items():
if isinstance(val, collections.Mapping):
orig_val = original.get(key, {})
if isinstance(orig_val, collections.Mapping):
original[key] = update_nested(orig_val, val)
else:
original[key] = val
else:
original[key] = val
return original
def write_file_or_filename(fp, content, mode='w'):
"""Write content to fp, whether fp is a string or a file-like object"""
if isinstance(fp, six.string_types):
with open(fp, mode) as f:
f.write(content)
else:
fp.write(content)
def display_traceback(in_ipython=True):
exc_info = sys.exc_info()
if in_ipython:
from IPython.core.getipython import get_ipython
ip = get_ipython()
else:
ip = None
if ip is not None:
ip.showtraceback(exc_info)
else:
traceback.print_exception(*exc_info)
| bsd-3-clause |
IBT-FMI/SAMRI | samri/development.py | 1 | 5858 | # -*- coding: utf-8 -*-
# Development work, e.g. for higher level functions.
# These functions are not intended to work on any machine or pass the tests.
# They are early drafts (e.g. of higher level workflows) intended to be shared among select collaborators or multiple machines of one collaborator.
# Please don't edit functions which are not yours, and only perform imports in local scope.
def vta_full(
workflow_name='generic',
):
from labbookdb.report.development import animal_multiselect
from samri.pipelines import glm
from samri.pipelines.preprocess import full_prep
from samri.report.snr import iter_significant_signal
from samri.utilities import bids_autofind
# Assuming data cobnverted to BIDS
bids_base = '~/ni_data/ofM.vta/bids'
#full_prep(bids_base, '/usr/share/mouse-brain-templates/dsurqec_200micron.nii',
# registration_mask='/usr/share/mouse-brain-templates/dsurqec_200micron_mask.nii',
# functional_match={'type':['cbv',],},
# structural_match={'acquisition':['TurboRARE']},
# actual_size=True,
# functional_registration_method='composite',
# negative_contrast_agent=True,
# out_dir='~/ni_data/ofM.vta/preprocessing',
# workflow_name=workflow_name,
# )
#glm.l1('~/ni_data/ofM.vta/preprocessing/generic',
# out_dir='~/ni_data/ofM.vta/l1',
# workflow_name=workflow_name,
# habituation="confound",
# mask='/usr/share/mouse-brain-templates/dsurqec_200micron_mask.nii',
# # We need the workdir to extract the betas
# keep_work=True,
# )
# Determining Responders by Significance
path_template, substitutions = bids_autofind('~/ni_data/ofM.vta/l1/generic/',
path_template="{bids_dir}/sub-{{subject}}/ses-{{session}}/sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_cbv_pfstat.nii.gz",
match_regex='.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+)_cbv_pfstat\.nii.gz',
)
print(substitutions)
iter_significant_signal(path_template,
substitutions=substitutions,
mask_path='/usr/share/mouse-brain-templates/dsurqec_200micron_mask.nii',
save_as='~/ni_data/ofM.dr/vta/generic/total_significance.csv'
)
def temporal_qc_separate():
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import numpy as np
import pandas as pd
from samri.report.snr import base_metrics
from samri.plotting.timeseries import multi
from samri.utilities import bids_substitution_iterator
substitutions = bids_substitution_iterator(
['testSTIM'],
['COILphantom'],
['CcsI'],
'/home/chymera/ni_data/phantoms/',
'bids',
acquisitions=['EPIalladj','EPIcopyadjNODUM','EPIcopyadj','EPImoveGOP'],
)
for i in substitutions:
timecourses = base_metrics('{data_dir}/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}.nii', i)
events_df = pd.read_csv('{data_dir}/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_events.tsv'.format(**i), sep='\t')
multi(timecourses,
designs=[],
events_dfs=[events_df],
subplot_titles='acquisition',
quantitative=False,
save_as='temp_{acquisition}.pdf'.format(**i),
samri_style=True,
ax_size=[16,6],
unit_ticking=True,
)
def temporal_qc_al_in_one():
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import numpy as np
from samri.report.snr import iter_base_metrics
from samri.plotting.timeseries import multi
from samri.utilities import bids_substitution_iterator
substitutions = bids_substitution_iterator(
['testSTIM'],
['COILphantom'],
['CcsI'],
'/home/chymera/ni_data/phantoms/',
'bids',
acquisitions=['EPIalladj','EPIcopyadjNODUM','EPIcopyadj','EPImoveGOP'],
)
timecourses = iter_base_metrics('{data_dir}/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}.nii', substitutions)
multi(timecourses,
designs=[],
events_dfs=[],
subplot_titles='acquisition',
quantitative=False,
save_as="temp_qc.pdf",
samri_style=True,
ax_size=[16,6],
unit_ticking=True,
)
def reg_cc(
path = "~/ni_data/ofM.dr/preprocessing/composite",
template = '/usr/share/mouse-brain-templates/dsurqec_200micron.nii',
radius=8,
autofind=False,
plot=False,
save = "f_reg_quality",
metrics = ['CC','GC','MI'],
):
from samri.utilities import bids_autofind
from samri.plotting.aggregate import registration_qc
from samri.report.registration import iter_measure_sim
from samri.typesetting import inline_anova
from samri.utilities import bids_substitution_iterator
if autofind:
path_template, substitutions = bids_autofind(path,"func")
else:
path_template = "{data_dir}/preprocessing/{preprocessing_dir}/sub-{subject}/ses-{session}/func/sub-{subject}_ses-{session}_acq-{acquisition}_task-{task}_cbv.nii.gz"
substitutions = bids_substitution_iterator(
["ofM", "ofMaF", "ofMcF1", "ofMcF2", "ofMpF"],
["4001","4007","4008","4011","5692","5694","5699","5700","5704","6255","6262"],
["CogB","JogB"],
"~/ni_data/ofM.dr/",
"composite",
acquisitions=['EPI','EPIlowcov'],
validate_for_template=path_template,
)
for metric in metrics:
df = iter_measure_sim(path_template, substitutions,
template,
metric=metric,
radius_or_number_of_bins=radius,
sampling_strategy="Regular",
sampling_percentage=0.33,
save_as= save + "_" + metric + ".csv",
)
| gpl-3.0 |
nelango/ViralityAnalysis | model/lib/nltk/sentiment/util.py | 3 | 30992 | # coding: utf-8
#
# Natural Language Toolkit: Sentiment Analyzer
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility methods for Sentiment Analysis.
"""
from copy import deepcopy
import codecs
import csv
import json
import pickle
import random
import re
import sys
import time
import nltk
from nltk.corpus import CategorizedPlaintextCorpusReader
from nltk.data import load
from nltk.tokenize.casual import EMOTICON_RE
from nltk.twitter.common import outf_writer_compat, extract_fields
#////////////////////////////////////////////////////////////
#{ Regular expressions
#////////////////////////////////////////////////////////////
# Regular expression for negation by Christopher Potts
NEGATION = r"""
(?:
^(?:never|no|nothing|nowhere|noone|none|not|
havent|hasnt|hadnt|cant|couldnt|shouldnt|
wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint
)$
)
|
n't"""
NEGATION_RE = re.compile(NEGATION, re.VERBOSE)
CLAUSE_PUNCT = r'^[.:;!?]$'
CLAUSE_PUNCT_RE = re.compile(CLAUSE_PUNCT)
# Happy and sad emoticons
HAPPY = set([
':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}',
':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D',
'=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P',
'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)',
'<3'
])
SAD = set([
':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<',
':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c',
':c', ':{', '>:\\', ';('
])
def timer(method):
"""
A timer decorator to measure execution performance of methods.
"""
def timed(*args, **kw):
start = time.time()
result = method(*args, **kw)
end = time.time()
tot_time = end - start
hours = int(tot_time / 3600)
mins = int((tot_time / 60) % 60)
# in Python 2.x round() will return a float, so we convert it to int
secs = int(round(tot_time % 60))
if hours == 0 and mins == 0 and secs < 10:
print('[TIMER] {0}(): {:.3f} seconds'.format(method.__name__, tot_time))
else:
print('[TIMER] {0}(): {1}h {2}m {3}s'.format(method.__name__, hours, mins, secs))
return result
return timed
#////////////////////////////////////////////////////////////
#{ Feature extractor functions
#////////////////////////////////////////////////////////////
"""
Feature extractor functions are declared outside the SentimentAnalyzer class.
Users should have the possibility to create their own feature extractors
without modifying SentimentAnalyzer.
"""
def extract_unigram_feats(document, unigrams, handle_negation=False):
"""
Populate a dictionary of unigram features, reflecting the presence/absence in
the document of each of the tokens in `unigrams`.
:param document: a list of words/tokens.
:param unigrams: a list of words/tokens whose presence/absence has to be
checked in `document`.
:param handle_negation: if `handle_negation == True` apply `mark_negation`
method to `document` before checking for unigram presence/absence.
:return: a dictionary of unigram features {unigram : boolean}.
>>> words = ['ice', 'police', 'riot']
>>> document = 'ice is melting due to global warming'.split()
>>> sorted(extract_unigram_feats(document, words).items())
[('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)]
"""
features = {}
if handle_negation:
document = mark_negation(document)
for word in unigrams:
features['contains({0})'.format(word)] = word in set(document)
return features
def extract_bigram_feats(document, bigrams):
"""
Populate a dictionary of bigram features, reflecting the presence/absence in
the document of each of the tokens in `bigrams`. This extractor function only
considers contiguous bigrams obtained by `nltk.bigrams`.
:param document: a list of words/tokens.
:param unigrams: a list of bigrams whose presence/absence has to be
checked in `document`.
:return: a dictionary of bigram features {bigram : boolean}.
>>> bigrams = [('global', 'warming'), ('police', 'prevented'), ('love', 'you')]
>>> document = 'ice is melting due to global warming'.split()
>>> sorted(extract_bigram_feats(document, bigrams).items())
[('contains(global - warming)', True), ('contains(love - you)', False),
('contains(police - prevented)', False)]
"""
features = {}
for bigr in bigrams:
features['contains({0} - {1})'.format(bigr[0], bigr[1])] = bigr in nltk.bigrams(document)
return features
#////////////////////////////////////////////////////////////
#{ Helper Functions
#////////////////////////////////////////////////////////////
def mark_negation(document, double_neg_flip=False, shallow=False):
"""
Append _NEG suffix to words that appear in the scope between a negation
and a punctuation mark.
:param document: a list of words/tokens, or a tuple (words, label).
:param shallow: if True, the method will modify the original document in place.
:param double_neg_flip: if True, double negation is considered affirmation
(we activate/deactivate negation scope everytime we find a negation).
:return: if `shallow == True` the method will modify the original document
and return it. If `shallow == False` the method will return a modified
document, leaving the original unmodified.
>>> sent = "I didn't like this movie . It was bad .".split()
>>> mark_negation(sent)
['I', "didn't", 'like_NEG', 'this_NEG', 'movie_NEG', '.', 'It', 'was', 'bad', '.']
"""
if not shallow:
document = deepcopy(document)
# check if the document is labeled. If so, do not consider the label.
labeled = document and isinstance(document[0], (tuple, list))
if labeled:
doc = document[0]
else:
doc = document
neg_scope = False
for i, word in enumerate(doc):
if NEGATION_RE.search(word):
if not neg_scope or (neg_scope and double_neg_flip):
neg_scope = not neg_scope
continue
else:
doc[i] += '_NEG'
elif neg_scope and CLAUSE_PUNCT_RE.search(word):
neg_scope = not neg_scope
elif neg_scope and not CLAUSE_PUNCT_RE.search(word):
doc[i] += '_NEG'
return document
def output_markdown(filename, **kwargs):
"""
Write the output of an analysis to a file.
"""
with codecs.open(filename, 'at') as outfile:
text = '\n*** \n\n'
text += '{0} \n\n'.format(time.strftime("%d/%m/%Y, %H:%M"))
for k in sorted(kwargs):
if isinstance(kwargs[k], dict):
dictionary = kwargs[k]
text += ' - **{0}:**\n'.format(k)
for entry in sorted(dictionary):
text += ' - {0}: {1} \n'.format(entry, dictionary[entry])
elif isinstance(kwargs[k], list):
text += ' - **{0}:**\n'.format(k)
for entry in kwargs[k]:
text += ' - {0}\n'.format(entry)
else:
text += ' - **{0}:** {1} \n'.format(k, kwargs[k])
outfile.write(text)
def save_file(content, filename):
"""
Store `content` in `filename`. Can be used to store a SentimentAnalyzer.
"""
print("Saving", filename)
with codecs.open(filename, 'wb') as storage_file:
# The protocol=2 parameter is for python2 compatibility
pickle.dump(content, storage_file, protocol=2)
def split_train_test(all_instances, n=None):
"""
Randomly split `n` instances of the dataset into train and test sets.
:param all_instances: a list of instances (e.g. documents) that will be split.
:param n: the number of instances to consider (in case we want to use only a
subset).
:return: two lists of instances. Train set is 8/10 of the total and test set
is 2/10 of the total.
"""
random.seed(12345)
random.shuffle(all_instances)
if not n or n > len(all_instances):
n = len(all_instances)
train_set = all_instances[:int(.8*n)]
test_set = all_instances[int(.8*n):n]
return train_set, test_set
def _show_plot(x_values, y_values, x_labels=None, y_labels=None):
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
plt.locator_params(axis='y', nbins=3)
axes = plt.axes()
axes.yaxis.grid()
plt.plot(x_values, y_values, 'ro', color='red')
plt.ylim(ymin=-1.2, ymax=1.2)
plt.tight_layout(pad=5)
if x_labels:
plt.xticks(x_values, x_labels, rotation='vertical')
if y_labels:
plt.yticks([-1, 0, 1], y_labels, rotation='horizontal')
# Pad margins so that markers are not clipped by the axes
plt.margins(0.2)
plt.show()
#////////////////////////////////////////////////////////////
#{ Parsing and conversion functions
#////////////////////////////////////////////////////////////
def json2csv_preprocess(json_file, outfile, fields, encoding='utf8', errors='replace',
gzip_compress=False, skip_retweets=True, skip_tongue_tweets=True,
skip_ambiguous_tweets=True, strip_off_emoticons=True, remove_duplicates=True,
limit=None):
"""
Convert json file to csv file, preprocessing each row to obtain a suitable
dataset for tweets Semantic Analysis.
:param json_file: the original json file containing tweets.
:param outfile: the output csv filename.
:param fields: a list of fields that will be extracted from the json file and
kept in the output csv file.
:param encoding: the encoding of the files.
:param errors: the error handling strategy for the output writer.
:param gzip_compress: if True, create a compressed GZIP file.
:param skip_retweets: if True, remove retweets.
:param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P"
emoticons.
:param skip_ambiguous_tweets: if True, remove tweets containing both happy
and sad emoticons.
:param strip_off_emoticons: if True, strip off emoticons from all tweets.
:param remove_duplicates: if True, remove tweets appearing more than once.
:param limit: an integer to set the number of tweets to convert. After the
limit is reached the conversion will stop. It can be useful to create
subsets of the original tweets json data.
"""
with codecs.open(json_file, encoding=encoding) as fp:
(writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
# write the list of fields as header
writer.writerow(fields)
if remove_duplicates == True:
tweets_cache = []
i = 0
for line in fp:
tweet = json.loads(line)
row = extract_fields(tweet, fields)
try:
text = row[fields.index('text')]
# Remove retweets
if skip_retweets == True:
if re.search(r'\bRT\b', text):
continue
# Remove tweets containing ":P" and ":-P" emoticons
if skip_tongue_tweets == True:
if re.search(r'\:\-?P\b', text):
continue
# Remove tweets containing both happy and sad emoticons
if skip_ambiguous_tweets == True:
all_emoticons = EMOTICON_RE.findall(text)
if all_emoticons:
if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD):
continue
# Strip off emoticons from all tweets
if strip_off_emoticons == True:
row[fields.index('text')] = re.sub(r'(?!\n)\s+', ' ', EMOTICON_RE.sub('', text))
# Remove duplicate tweets
if remove_duplicates == True:
if row[fields.index('text')] in tweets_cache:
continue
else:
tweets_cache.append(row[fields.index('text')])
except ValueError:
pass
writer.writerow(row)
i += 1
if limit and i >= limit:
break
outf.close()
def parse_tweets_set(filename, label, word_tokenizer=None, sent_tokenizer=None,
skip_header=True):
"""
Parse csv file containing tweets and output data a list of (text, label) tuples.
:param filename: the input csv filename.
:param label: the label to be appended to each tweet contained in the csv file.
:param word_tokenizer: the tokenizer instance that will be used to tokenize
each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()).
If no word_tokenizer is specified, tweets will not be tokenized.
:param sent_tokenizer: the tokenizer that will be used to split each tweet into
sentences.
:param skip_header: if True, skip the first line of the csv file (which usually
contains headers).
:return: a list of (text, label) tuples.
"""
tweets = []
if not sent_tokenizer:
sent_tokenizer = load('tokenizers/punkt/english.pickle')
# If we use Python3.x we can proceed using the 'rt' flag
if sys.version_info[0] == 3:
with codecs.open(filename, 'rt') as csvfile:
reader = csv.reader(csvfile)
if skip_header == True:
next(reader, None) # skip the header
i = 0
for tweet_id, text in reader:
# text = text[1]
i += 1
sys.stdout.write('Loaded {0} tweets\r'.format(i))
# Apply sentence and word tokenizer to text
if word_tokenizer:
tweet = [w for sent in sent_tokenizer.tokenize(text)
for w in word_tokenizer.tokenize(sent)]
else:
tweet = text
tweets.append((tweet, label))
# If we use Python2.x we need to handle encoding problems
elif sys.version_info[0] < 3:
with codecs.open(filename) as csvfile:
reader = csv.reader(csvfile)
if skip_header == True:
next(reader, None) # skip the header
i = 0
for row in reader:
unicode_row = [x.decode('utf8') for x in row]
text = unicode_row[1]
i += 1
sys.stdout.write('Loaded {0} tweets\r'.format(i))
# Apply sentence and word tokenizer to text
if word_tokenizer:
tweet = [w.encode('utf8') for sent in sent_tokenizer.tokenize(text)
for w in word_tokenizer.tokenize(sent)]
else:
tweet = text
tweets.append((tweet, label))
print("Loaded {0} tweets".format(i))
return tweets
#////////////////////////////////////////////////////////////
#{ Demos
#////////////////////////////////////////////////////////////
def demo_tweets(trainer, n_instances=None, output=None):
"""
Train and test Naive Bayes classifier on 10000 tweets, tokenized using
TweetTokenizer.
Features are composed of:
- 1000 most frequent unigrams
- 100 top bigrams (using BigramAssocMeasures.pmi)
:param trainer: `train` method of a classifier.
:param n_instances: the number of total tweets that have to be used for
training and testing. Tweets will be equally split between positive and
negative.
:param output: the output file where results have to be reported.
"""
from nltk.tokenize import TweetTokenizer
from sentiment_analyzer import SentimentAnalyzer
from nltk.corpus import twitter_samples, stopwords
# Different customizations for the TweetTokenizer
tokenizer = TweetTokenizer(preserve_case=False)
# tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True)
# tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True)
if n_instances is not None:
n_instances = int(n_instances/2)
fields = ['id', 'text']
positive_json = twitter_samples.abspath("positive_tweets.json")
positive_csv = 'positive_tweets.csv'
json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances)
negative_json = twitter_samples.abspath("negative_tweets.json")
negative_csv = 'negative_tweets.csv'
json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances)
neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer)
pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer)
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_pos_docs, test_pos_docs = split_train_test(pos_docs)
train_neg_docs, test_neg_docs = split_train_test(neg_docs)
training_tweets = train_pos_docs+train_neg_docs
testing_tweets = test_pos_docs+test_neg_docs
sentim_analyzer = SentimentAnalyzer()
# stopwords = stopwords.words('english')
# all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords]
all_words = [word for word in sentim_analyzer.all_words(training_tweets)]
# Add simple unigram word features
unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Add bigram collocation features
bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats([tweet[0] for tweet in training_tweets],
top_n=100, min_freq=12)
sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats)
training_set = sentim_analyzer.apply_features(training_tweets)
test_set = sentim_analyzer.apply_features(testing_tweets)
classifier = sentim_analyzer.train(trainer, training_set)
# classifier = sentim_analyzer.train(trainer, training_set, max_iter=4)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__,
Tokenizer=tokenizer.__class__.__name__, Feats=extr,
Results=results, Instances=n_instances)
def demo_movie_reviews(trainer, n_instances=None, output=None):
"""
Train classifier on all instances of the Movie Reviews dataset.
The corpus has been preprocessed using the default sentence tokenizer and
WordPunctTokenizer.
Features are composed of:
- most frequent unigrams
:param trainer: `train` method of a classifier.
:param n_instances: the number of total reviews that have to be used for
training and testing. Reviews will be equally split between positive and
negative.
:param output: the output file where results have to be reported.
"""
from nltk.corpus import movie_reviews
from sentiment_analyzer import SentimentAnalyzer
if n_instances is not None:
n_instances = int(n_instances/2)
pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]]
neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]]
# We separately split positive and negative instances to keep a balanced
# uniform class distribution in both train and test sets.
train_pos_docs, test_pos_docs = split_train_test(pos_docs)
train_neg_docs, test_neg_docs = split_train_test(neg_docs)
training_docs = train_pos_docs+train_neg_docs
testing_docs = test_pos_docs+test_neg_docs
sentim_analyzer = SentimentAnalyzer()
all_words = sentim_analyzer.all_words(training_docs)
# Add simple unigram word features
unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Apply features to obtain a feature-value representation of our datasets
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
classifier = sentim_analyzer.train(trainer, training_set)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__,
Tokenizer='WordPunctTokenizer', Feats=extr, Results=results,
Instances=n_instances)
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None):
"""
Train and test a classifier on instances of the Subjective Dataset by Pang and
Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
All tokens (words and punctuation marks) are separated by a whitespace, so
we use the basic WhitespaceTokenizer to parse the data.
:param trainer: `train` method of a classifier.
:param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
:param n_instances: the number of total sentences that have to be used for
training and testing. Sentences will be equally split between positive
and negative.
:param output: the output file where results have to be reported.
"""
from sentiment_analyzer import SentimentAnalyzer
from nltk.corpus import subjectivity
if n_instances is not None:
n_instances = int(n_instances/2)
subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_subj_docs, test_subj_docs = split_train_test(subj_docs)
train_obj_docs, test_obj_docs = split_train_test(obj_docs)
training_docs = train_subj_docs+train_obj_docs
testing_docs = test_subj_docs+test_obj_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
# Add simple unigram word features handling negation
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Apply features to obtain a feature-value representation of our datasets
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
classifier = sentim_analyzer.train(trainer, training_set)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if save_analyzer == True:
save_file(sentim_analyzer, 'sa_subjectivity.pickle')
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__,
Tokenizer='WhitespaceTokenizer', Feats=extr,
Instances=n_instances, Results=results)
return sentim_analyzer
def demo_sent_subjectivity(text):
"""
Classify a single sentence as subjective or objective using a stored
SentimentAnalyzer.
:param text: a sentence whose subjectivity has to be classified.
"""
from nltk.classify import NaiveBayesClassifier
from nltk.tokenize import regexp
word_tokenizer = regexp.WhitespaceTokenizer()
try:
sentim_analyzer = load('sa_subjectivity.pickle')
except LookupError:
print('Cannot find the sentiment analyzer you want to load.')
print('Training a new one using NaiveBayesClassifier.')
sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True)
# Tokenize and convert to lower case
tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)]
print(sentim_analyzer.classify(tokenized_text))
def demo_liu_hu_lexicon(sentence, plot=False):
"""
Basic example of sentiment classification using Liu and Hu opinion lexicon.
This function simply counts the number of positive, negative and neutral words
in the sentence and classifies it depending on which polarity is more represented.
Words that do not appear in the lexicon are considered as neutral.
:param sentence: a sentence whose polarity has to be classified.
:param plot: if True, plot a visual representation of the sentence polarity.
"""
from nltk.corpus import opinion_lexicon
from nltk.tokenize import treebank
tokenizer = treebank.TreebankWordTokenizer()
pos_words = 0
neg_words = 0
tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)]
x = list(range(len(tokenized_sent))) # x axis for the plot
y = []
for word in tokenized_sent:
if word in opinion_lexicon.positive():
pos_words += 1
y.append(1) # positive
elif word in opinion_lexicon.negative():
neg_words += 1
y.append(-1) # negative
else:
y.append(0) # neutral
if pos_words > neg_words:
print('Positive')
elif pos_words < neg_words:
print('Negative')
elif pos_words == neg_words:
print('Neutral')
if plot == True:
_show_plot(x, y, x_labels=tokenized_sent, y_labels=['Negative', 'Neutral', 'Positive'])
def demo_vader_instance(text):
"""
Output polarity scores for a text using Vader approach.
:param text: a text whose polarity has to be evaluated.
"""
from vader import SentimentIntensityAnalyzer
vader_analyzer = SentimentIntensityAnalyzer()
print(vader_analyzer.polarity_scores(text))
def demo_vader_tweets(n_instances=None, output=None):
"""
Classify 10000 positive and negative tweets using Vader approach.
:param n_instances: the number of total tweets that have to be classified.
:param output: the output file where results have to be reported.
"""
from collections import defaultdict
from nltk.corpus import twitter_samples
from vader import SentimentIntensityAnalyzer
from nltk.metrics import (accuracy as eval_accuracy, precision as eval_precision,
recall as eval_recall, f_measure as eval_f_measure)
if n_instances is not None:
n_instances = int(n_instances/2)
fields = ['id', 'text']
positive_json = twitter_samples.abspath("positive_tweets.json")
positive_csv = 'positive_tweets.csv'
json2csv_preprocess(positive_json, positive_csv, fields, strip_off_emoticons=False,
limit=n_instances)
negative_json = twitter_samples.abspath("negative_tweets.json")
negative_csv = 'negative_tweets.csv'
json2csv_preprocess(negative_json, negative_csv, fields, strip_off_emoticons=False,
limit=n_instances)
pos_docs = parse_tweets_set(positive_csv, label='pos')
neg_docs = parse_tweets_set(negative_csv, label='neg')
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_pos_docs, test_pos_docs = split_train_test(pos_docs)
train_neg_docs, test_neg_docs = split_train_test(neg_docs)
training_tweets = train_pos_docs+train_neg_docs
testing_tweets = test_pos_docs+test_neg_docs
vader_analyzer = SentimentIntensityAnalyzer()
gold_results = defaultdict(set)
test_results = defaultdict(set)
acc_gold_results = []
acc_test_results = []
labels = set()
num = 0
for i, (text, label) in enumerate(testing_tweets):
labels.add(label)
gold_results[label].add(i)
acc_gold_results.append(label)
score = vader_analyzer.polarity_scores(text)['compound']
if score > 0:
observed = 'pos'
else:
observed = 'neg'
num += 1
acc_test_results.append(observed)
test_results[observed].add(i)
metrics_results = {}
for label in labels:
accuracy_score = eval_accuracy(acc_gold_results,
acc_test_results)
metrics_results['Accuracy'] = accuracy_score
precision_score = eval_precision(gold_results[label],
test_results[label])
metrics_results['Precision [{0}]'.format(label)] = precision_score
recall_score = eval_recall(gold_results[label],
test_results[label])
metrics_results['Recall [{0}]'.format(label)] = recall_score
f_measure_score = eval_f_measure(gold_results[label],
test_results[label])
metrics_results['F-measure [{0}]'.format(label)] = f_measure_score
for result in sorted(metrics_results):
print('{0}: {1}'.format(result, metrics_results[result]))
if output:
output_markdown(output, Approach='Vader', Dataset='labeled_tweets',
Instances=n_instances, Results=metrics_results)
if __name__ == '__main__':
from nltk.classify import NaiveBayesClassifier, MaxentClassifier
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import LinearSVC
naive_bayes = NaiveBayesClassifier.train
svm = SklearnClassifier(LinearSVC()).train
maxent = MaxentClassifier.train
demo_tweets(naive_bayes)
# demo_movie_reviews(svm)
# demo_subjectivity(svm)
# demo_sent_subjectivity("she's an artist , but hasn't picked up a brush in a year . ")
# demo_liu_hu_lexicon("This movie was actually neither that funny, nor super witty.", plot=True)
# demo_vader_instance("This movie was actually neither that funny, nor super witty.")
# demo_vader_tweets()
| mit |
ChinaQuants/bokeh | bokeh/tests/test_sources.py | 26 | 3245 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource, ServerDataSource
class TestColumnDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
class TestServerDataSources(unittest.TestCase):
def test_basic(self):
ds = ServerDataSource()
self.assertTrue(isinstance(ds, DataSource))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
paladin74/neural-network-animation | matplotlib/patches.py | 10 | 142681 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or
facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_fill(fill)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if six.callable(self._contains):
return self._contains(self, mouseevent)
if radius is None:
radius = self.get_linewidth()
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
if radius is None:
radius = self.get_linewidth()
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.edgecolor']
self._original_edgecolor = color
self._edgecolor = colors.colorConverter.to_rgba(color, self._alpha)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.facecolor']
self._original_facecolor = color # save: otherwise changing _fill
# may lose alpha information
self._facecolor = colors.colorConverter.to_rgba(color, self._alpha)
if not self._fill:
self._facecolor = list(self._facecolor)
self._facecolor[3] = 0
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self.set_facecolor(self._original_facecolor) # using self._fill and
# self._alpha
self.set_edgecolor(self._original_edgecolor)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None:
ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self.set_facecolor(self._original_facecolor)
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.colorConverter.to_rgba(
self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
self._angle = angle
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width == 0 or self._height == 0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
def set_radius(self, radius):
self._path = None
self.r = radius
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
def set_width(self, width):
self._path = None
self.width = width
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx ** 2 + dy ** 2) or 1 # account for div by zero
cx = float(dx) / L
sx = float(dy) / L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 20 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx ** 2 + dy ** 2)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
cx = float(dx) / distance
sx = float(dy) / distance
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
self.radius = radius
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
#self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
#adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, \
height + 2. * pad,
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
#self.pad = pad
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, \
height + 2. * pad,
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2. * pad - 2 * dr, \
height + 2. * pad - 2 * dr,
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2. * pad - tooth_size, \
height + 2. * pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = list(zip(bottom_saw_x, bottom_saw_y)) + \
list(zip(right_saw_x, right_saw_y)) + \
list(zip(top_saw_x, top_saw_y)) + \
list(zip(left_saw_x, left_saw_y)) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1) // 2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: %(AvailableBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arm is extend so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
*armA* : minimum length of armA
*armB* : minimum length of armB
*fraction* : a fraction of the distance between two points that
will be added to armA and armB.
*angle* : angle of the connecting line (if None, parallel to A
and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the amount arrow head and
etc. will be scaled. The linewidth may be used to adjust
the the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrinked,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx ** 2 + dy ** 2)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if self.beginarrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if self.endarrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA, lengthA=lengthA, angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = \
make_wedged_bezier2(arrow_in, head_width / 2.,
wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
#head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
self._dpi_cor = dpi_cor
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| mit |
glouppe/scikit-learn | examples/text/document_classification_20newsgroups.py | 27 | 10521 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
awni/tensorflow | tensorflow/examples/skflow/text_classification_builtin_rnn_model.py | 1 | 2881 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 10
vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 50
# Customized function to transform batched X into embeddings
def input_op_fn(X):
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = skflow.ops.split_squeeze(1, MAX_DOCUMENT_LENGTH, word_vectors)
return word_list
# Single direction GRU with a single layer
classifier = skflow.TensorFlowRNNClassifier(rnn_size=EMBEDDING_SIZE,
n_classes=15, cell_type='gru', input_op_fn=input_op_fn,
num_layers=1, bidirectional=False, sequence_length=None,
steps=1000, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_rnn')
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
toobaz/pandas | pandas/tests/window/test_api.py | 2 | 12926 | from collections import OrderedDict
import warnings
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, concat
from pandas.core.base import SpecificationError
from pandas.tests.window.common import Base
import pandas.util.testing as tm
class TestApi(Base):
def setup_method(self, method):
self._create_data()
def test_getitem(self):
r = self.frame.rolling(window=5)
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.rolling(window=5)[1]
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.rolling(window=5)[1, 3]
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns[[1, 3]])
r = self.frame.rolling(window=5)[[1, 3]]
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns[[1, 3]])
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=["A", "B"])
g = df.rolling(window=5)
with pytest.raises(KeyError, match="Columns not found: 'C'"):
g[["C"]]
with pytest.raises(KeyError, match="^[^A]+$"):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[["A", "C"]]
def test_attribute_access(self):
df = DataFrame([[1, 2]], columns=["A", "B"])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r["A"].sum())
msg = "'Rolling' object has no attribute 'F'"
with pytest.raises(AttributeError, match=msg):
r.F
def tests_skip_nuisance(self):
df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})
r = df.rolling(window=3)
result = r[["A", "B"]].sum()
expected = DataFrame(
{"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]},
columns=list("AB"),
)
tm.assert_frame_equal(result, expected)
def test_skip_sum_object_raises(self):
df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})
r = df.rolling(window=3)
result = r.sum()
expected = DataFrame(
{"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]},
columns=list("AB"),
)
tm.assert_frame_equal(result, expected)
def test_agg(self):
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
a_mean = r["A"].mean()
a_std = r["A"].std()
a_sum = r["A"].sum()
b_mean = r["B"].mean()
b_std = r["B"].std()
b_sum = r["B"].sum()
result = r.aggregate([np.mean, np.std])
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([["A", "B"], ["mean", "std"]])
tm.assert_frame_equal(result, expected)
result = r.aggregate({"A": np.mean, "B": np.std})
expected = concat([a_mean, b_std], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({"A": ["mean", "std"]})
expected = concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "std")])
tm.assert_frame_equal(result, expected)
result = r["A"].aggregate(["mean", "sum"])
expected = concat([a_mean, a_sum], axis=1)
expected.columns = ["mean", "sum"]
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
# using a dict with renaming
warnings.simplefilter("ignore", FutureWarning)
result = r.aggregate({"A": {"mean": "mean", "sum": "sum"}})
expected = concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([("A", "mean"), ("A", "sum")])
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r.aggregate(
{
"A": {"mean": "mean", "sum": "sum"},
"B": {"mean2": "mean", "sum2": "sum"},
}
)
expected = concat([a_mean, a_sum, b_mean, b_sum], axis=1)
exp_cols = [("A", "mean"), ("A", "sum"), ("B", "mean2"), ("B", "sum2")]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]})
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_apply(self, raw):
# passed lambda
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
a_sum = r["A"].sum()
result = r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
rcustom = r["B"].apply(lambda x: np.std(x, ddof=1), raw=raw)
expected = concat([a_sum, rcustom], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_consistency(self):
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
expected = pd.MultiIndex.from_product([list("AB"), ["sum", "mean"]])
tm.assert_index_equal(result, expected)
result = r["A"].agg([np.sum, np.mean]).columns
expected = Index(["sum", "mean"])
tm.assert_index_equal(result, expected)
result = r.agg({"A": [np.sum, np.mean]}).columns
expected = pd.MultiIndex.from_tuples([("A", "sum"), ("A", "mean")])
tm.assert_index_equal(result, expected)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
msg = r"cannot perform renaming for (r1|r2) with a nested dictionary"
with pytest.raises(SpecificationError, match=msg):
r.aggregate({"r1": {"A": ["mean", "sum"]}, "r2": {"B": ["mean", "sum"]}})
expected = concat(
[r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1
)
expected.columns = pd.MultiIndex.from_tuples(
[("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")]
)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r[["A", "B"]].agg(
{"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}}
)
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r.agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}})
expected.columns = pd.MultiIndex.from_tuples(
[
("A", "ra", "mean"),
("A", "ra", "std"),
("B", "rb", "mean"),
("B", "rb", "std"),
]
)
tm.assert_frame_equal(result, expected, check_like=True)
def test_count_nonnumeric_types(self):
# GH12541
cols = [
"int",
"float",
"string",
"datetime",
"timedelta",
"periods",
"fl_inf",
"fl_nan",
"str_nan",
"dt_nat",
"periods_nat",
]
df = DataFrame(
{
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"datetime": pd.date_range("20170101", periods=3),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
"periods": [
pd.Period("2012-01"),
pd.Period("2012-02"),
pd.Period("2012-03"),
],
"fl_inf": [1.0, 2.0, np.Inf],
"fl_nan": [1.0, 2.0, np.NaN],
"str_nan": ["aa", "bb", np.NaN],
"dt_nat": [
Timestamp("20170101"),
Timestamp("20170203"),
Timestamp(None),
],
"periods_nat": [
pd.Period("2012-01"),
pd.Period("2012-02"),
pd.Period(None),
],
},
columns=cols,
)
expected = DataFrame(
{
"int": [1.0, 2.0, 2.0],
"float": [1.0, 2.0, 2.0],
"string": [1.0, 2.0, 2.0],
"datetime": [1.0, 2.0, 2.0],
"timedelta": [1.0, 2.0, 2.0],
"periods": [1.0, 2.0, 2.0],
"fl_inf": [1.0, 2.0, 2.0],
"fl_nan": [1.0, 2.0, 1.0],
"str_nan": [1.0, 2.0, 1.0],
"dt_nat": [1.0, 2.0, 1.0],
"periods_nat": [1.0, 2.0, 1.0],
},
columns=cols,
)
result = df.rolling(window=2).count()
tm.assert_frame_equal(result, expected)
result = df.rolling(1).count()
expected = df.notna().astype(float)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_window_with_args(self):
# make sure that we are aggregating window functions correctly with arg
r = Series(np.random.randn(100)).rolling(
window=10, min_periods=1, win_type="gaussian"
)
expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1)
expected.columns = ["<lambda>", "<lambda>"]
result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)])
tm.assert_frame_equal(result, expected)
def a(x):
return x.mean(std=10)
def b(x):
return x.mean(std=0.01)
expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1)
expected.columns = ["a", "b"]
result = r.aggregate([a, b])
tm.assert_frame_equal(result, expected)
def test_preserve_metadata(self):
# GH 10565
s = Series(np.arange(100), name="foo")
s2 = s.rolling(30).sum()
s3 = s.rolling(20).sum()
assert s2.name == "foo"
assert s3.name == "foo"
@pytest.mark.parametrize(
"func,window_size,expected_vals",
[
(
"rolling",
2,
[
[np.nan, np.nan, np.nan, np.nan],
[15.0, 20.0, 25.0, 20.0],
[25.0, 30.0, 35.0, 30.0],
[np.nan, np.nan, np.nan, np.nan],
[20.0, 30.0, 35.0, 30.0],
[35.0, 40.0, 60.0, 40.0],
[60.0, 80.0, 85.0, 80],
],
),
(
"expanding",
None,
[
[10.0, 10.0, 20.0, 20.0],
[15.0, 20.0, 25.0, 20.0],
[20.0, 30.0, 30.0, 20.0],
[10.0, 10.0, 30.0, 30.0],
[20.0, 30.0, 35.0, 30.0],
[26.666667, 40.0, 50.0, 30.0],
[40.0, 80.0, 60.0, 30.0],
],
),
],
)
def test_multiple_agg_funcs(self, func, window_size, expected_vals):
# GH 15072
df = pd.DataFrame(
[
["A", 10, 20],
["A", 20, 30],
["A", 30, 40],
["B", 10, 30],
["B", 30, 40],
["B", 40, 80],
["B", 80, 90],
],
columns=["stock", "low", "high"],
)
f = getattr(df.groupby("stock"), func)
if window_size:
window = f(window_size)
else:
window = f()
index = pd.MultiIndex.from_tuples(
[("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)],
names=["stock", None],
)
columns = pd.MultiIndex.from_tuples(
[("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")]
)
expected = pd.DataFrame(expected_vals, index=index, columns=columns)
result = window.agg(
OrderedDict((("low", ["mean", "max"]), ("high", ["mean", "min"])))
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
SylvainTakerkart/vobi_one | examples/scripts_vodev_0.4/script0_import_and_parameters_estimation.py | 1 | 10389 | # Author: Flavien Garcia <flavien.garcia@free.fr>
# Sylvain Takerkart <Sylvain.Takerkart@univ-amu.fr>
# License: BSD Style.
"""
Description
-----------
This script processes the oidata functions on some selected blk files or
on some raw file already imported.
The process is decomposed in 4 or 6 steps :
1. Data import (if not imported yet)
2. Conditions file creation (if not created yet)
3. Calculates the mean of spectrums of each file and save a data graph
4. Averages each file over a same ROI
5. Estimates the time constant 'tau' for the dye bleaching and the heartbeat
frequency by executing a non-linear fit thanks to Nelder-Mead simplex direct
search method and save results as an histogram data graph
6. Visualization of a data graphs
Notes
-----
1. Copy the script in a temporary directory of your choice and cd into this directory
2. Change parameters directly in the script itself
3. Write on a shell : brainvisa --noMainWindow --shell.
4. Write : %run script0_import_and_parameters_estimation.py
"""
############################## SECTION TO CHANGE ###############################
DATABASE = '/riou/work/crise/takerkart/vodev_0.4/' # Database
#DATA_BLK = '/riou/work/crise/takerkart/tethys_data_for_jarron/raw_unimported_data/vobi_one_demo_data/raw_blk_data'
DATA_BLK = '/riou/work/invibe/DATA/OPTICAL_IMAGING/Monkey_IO/wallace/080313'
protocol='protocol_sc' # Protocol name
subject='wallace_tbin1_sbin2_invibe' # Subject name
session_date='080313' # Sesion date (must be in format YYMMDD)
temporal_binning=1
spatial_binning=2
analysis_name='_1' # Analysis name
period = 1./110 # Period between two samples (before the temporal binning)
#conditions_list=['00','01','02','03','04','05','06','15'] # Conditions list of BLK files to import
conditions_list=['00','01','02','03','04','05','06','15'] # Conditions list of BLK files to import
nb_files_by_cdt=0 # The number of files, by conditions, to import.
# Let to 0 to apply on all session
# If not, type 2 or more (1 is not accepted, and will be replaced by 2)
blank_conditions_list=[0,15] # Raw files to spectral analysis and parameters estimations processes
tau_max=6 # Maximum tau value [in seconds]
corner0=(50,100) # Top left-hand corner for parameters estimation
corner1=(100,150) # Bottom right_hand corner for parameters estimation
format='.nii' # NIfTI (by default) or gz compressed NIfTI
################################################################################
# orig alex values (with spatial binning = 1)
#corner0=(125,250) # Top left-hand corner for parameters estimation
#corner1=(200,300) # Bottom right_hand corner for parameters estimation
########################## DO NOT TOUCH THIS SECTION ###########################
# Imports
from neuroProcesses import * # Provides a hierarchy to get object's path
import os
import numpy as np
import oidata.oisession_preprocesses as oisession_preprocesses # Session-level processing functions
import oidata.oitrial_processes as oitrial_processes # Trial-level processing functions
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import oidata.oisession as oisession
# Parameters validation
# Conditions list
cdt=''
try:
eval_cdt_list=sorted(eval(str(blank_conditions_list))) # Str to int
if len(eval_cdt_list)>1 and type(eval_cdt_list[0])!=int:
raise SyntaxError('Conditions list not properly completed')
except SyntaxError:
raise SyntaxError('Conditions list is not properly completed')
# Maximum value of tau
try:
tau_max=eval(str(tau_max))
except:
raise SyntaxError('Please select a number value [in seconds] for maximum tau value')
# Top left-hand corner
try:
c0=eval(str(corner0)) # Values recovery
except SyntaxError:
raise SyntaxError('Top left-hand corner is not properly completed')
try:
if len(c0)==2:
corner0=c0
else:
raise SyntaxError('Top left-hand corner is not properly completed')
except TypeError:
raise TypeError('Top left-hand corner is not properly completed')
# Bottom right-hand corner
try:
c1=eval(str(corner1)) # Values recovery
except SyntaxError:
raise SyntaxError('Bottom right-hand corner is not properly completed')
try:
if len(c1)==2:
corner1=c1
else:
raise SyntaxError('Top left-hand corner is not properly completed')
except TypeError:
raise TypeError('Bottom right-hand corner is not properly completed')
# Creation of lists of BLK files
blk_list=[] # BLK files list initialization
counter={}
if nb_files_by_cdt==0: # If user want to apply script on all session
nb_files_by_cdt=len(os.listdir(DATA_BLK))
if nb_files_by_cdt==1:
nb_files_by_cdt=2
for cdt in conditions_list:
counter[cdt]=0
for name in os.listdir(DATA_BLK): # For each file
if name[-4:]=='.BLK' and name[2:4] in conditions_list and counter[name[2:4]]<nb_files_by_cdt: # If current file extension is '.BLK'
blk_list.append(name) # Add BLK file name
counter[name[2:4]]+=1
info_file_list=[] # Path initialization
print('IMPORTING RAW DATA INTO THE DATABASE')
try: # Verify if a conditions file already exists
# Conditions file path creation
path_cond=os.path.join(DATABASE\
,protocol\
,subject\
,'session_'+session_date\
,'oitrials_analysis/conditions.txt')
# Conditions file path recovery
raw_name,experiences,trials,conditions,selected=np.loadtxt(path_cond, delimiter='\t', unpack=True,dtype=str)
# Recovery of files informations
for name in raw_name: # For each trial
session=name[1:7] # Session recovery
exp=name[9:11] # Experiment recovery
trial=name[13:17] # Trial recovery
condition=name[19:22] # Conditions recovery
path=os.path.join(os.path.split(path_cond)[0]\
,'exp'+exp,'trial'+trial,'raw',name) # Path creation
info_file={'session':session\
,'exp':exp,'trial':trial\
,'condition':condition,'path':path} # Put them on info_file
info_file_list.append(info_file) # Add info file
print('Data already imported') # Inform user data are already imported
imported=True # Data already imported
except: # If not, data import is needed
imported=False # Data not imported yet
if imported==False: # If data not imported yet
print('Importing data...')
# Load datas
current_img=0 # Index of current image
for blk_name in blk_list: # For each blk file
info_file=oitrial_processes.import_external_data_process(
input=os.path.join(DATA_BLK,blk_name), # File path
period = period, # Period between two frames
database=DATABASE, # Database path
protocol=protocol, # Protocol name
subject=subject, # Subject name
format=format,
temporal_binning=temporal_binning,
spatial_binning=spatial_binning,
mode=True,
script=True)
current_img+=1
print('\tImported trial:'+str(current_img)+'/'+str(len(blk_list)))
info_file_list.append(info_file)
print('CREATION OF THE CONDITIONS FILE FOR EACH SESSION')
# Creation of the conditions file
oisession_preprocesses.create_trials_conds_file_process(
DATABASE, # Database path
protocol, # Protocol name
subject, # Subject name
'session_'+session_date, # Session
mode=True,
script=True)
# Conditions file path creation
path_cond=os.path.join(DATABASE\
,protocol\
,subject\
,'session_'+session_date\
,'oitrials_analysis/conditions.txt')
# Conditions file path recovery
raw_name,experiences,trials,conditions,selected=np.loadtxt(path_cond\
,delimiter='\t'\
,unpack=True\
,dtype=str)
print('ESTIMATION OF NOISE PARAMETERS ON BLANK TRIALS')
# Conditions list recovery
for c in range(len(eval_cdt_list)): # For each condition
cdt+='_c'+str(eval_cdt_list[c])
# Region recovery
region='_'+str(corner0[0])+'_'+str(corner0[1])+'_'+str(corner1[0])+'_'+str(corner1[1])
print('\tSpectral analysis')
# Data graph creation
info_model_files=oisession_preprocesses.spectral_analysis_process(
database=DATABASE, # Database path
protocol=protocol, # Protocol name
subject=subject, # Subject name
session='session_'+session_date , # Session
analysis='raw', # Analysis type or name
conditions=blank_conditions_list,
corner0=corner0,
corner1=corner1,
data_graph=os.path.join(DATABASE,protocol,subject,'session_'+session_date,'oisession_analysis/raw','spectral_analysis'+cdt+'.png'),
)
print('\tTau and heartbeat frequency estimation')
# Data graph creation
info_model_files=oisession_preprocesses.tau_and_heartbeat_frequency_estimation_process(
DATABASE, # Database path
protocol, # Protocol name
subject, # Subject name
'session_'+session_date , # Session
'raw', # Analysis type or name
blank_conditions_list,
tau_max,
corner0=corner0,
corner1=corner1,
data_graph=os.path.join(DATABASE,protocol,subject,'session_'+session_date,'oisession_analysis/raw','tau_and_fh_histograms'+cdt+region+'.png'),
)
print('PLOTTING ESTIMATION RESULTS')
from PyQt4 import Qt
import sys
import visu_png
app = Qt.QApplication(sys.argv)
view = mainThreadActions().call(visu_png.DataGraphModel,os.path.join(DATABASE,protocol,subject,'session_'+session_date,'oisession_analysis/raw','spectral_analysis'+cdt+'.png')) # New thread creation and call to png vizualizer
mainThreadActions().push(view.show) # Displaying new window
view2 = mainThreadActions().call(visu_png.DataGraphModel,os.path.join(DATABASE,protocol,subject,'session_'+session_date,'oisession_analysis/raw','tau_and_fh_histograms'+cdt+region+'.png')) # New thread creation and call to png vizualizer
mainThreadActions().push(view2.show) # Displaying new window
sys.exit(app.exec_())
| gpl-3.0 |
ShadowTemplate/ScriBa | test.py | 1 | 5074 | import re
import glob
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
# http://stackoverflow.com/questions/12118720/python-tf-idf-cosine-to-find-document-similarity
# http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer.get_feature_names
# http://blog.christianperone.com/2013/09/machine-learning-cosine-similarity-for-vector-space-models-part-iii/
# os_client = OpenSubtitles()
# user_agent = 'ScriBa v1.0'
# imdb_ids = ['172495', '113497', '468569']
# imdb_ids = ['0119448', '0276981', '0120126', '0182668', '0399877', '0101591']
#
# imdb_url = 'http://www.imdb.com/title/tt0172495'
# token = 'hvmr5f1g99engqihncqo6lb665'
# token = os_client.login(user_agent=user_agent)
# os_client.set_token(token)
# print('Token: {0}\n'.format(token))
#
# for i in range(len(imdb_ids)):
# data = os_client.search_subtitles_for_movie(imdb_ids[i], 'eng')
# print('{0}: {1} result(s)'.format(i, len(data)))
#
# data = os_client.search_subtitles_for_movies(imdb_ids, 'eng')
# for i in range(len(data)):
# print('{0} - {1} - {2}'.format(i, data[i]['IDMovieImdb'], data[i]['IDSubtitleFile']))
#
# list of dictionaries. Each dictionary contains the IDSubtitleFile and the SubDownloadLink
# list_dict = get_subtitle_list(os_client, imdb_id)
# print(list_dict)
# id_list = [sub['IDSubtitleFile'] for sub in list_dict]
# print(id_list)
# print(pprint(os_client.download_subtitles(id_list)))
#
# get IMDb details
# data = os_client.get_imdb_movie_details(imdb_id)
# print('Movie details: {0}\n'.format(data))
#
# search for subtitles
# data = os_client.search_subtitles_for_movie(imdb_id, 'eng')
# note that each JSON contains also the direct download link
# print('Found {0} subtitle(s):\n{1}\n[...]\n'.format(len(data), pprint(data[0])))
#
# extract all links/ids from data
# subs_links = [item['SubDownloadLink'] for item in data]
# print('Links: {0}\n'.format(subs_links))
# subs_ids = [item['IDSubtitle'] for item in data]
# print('Ids: {0}\n'.format(subs_ids))
#
# retrieve encoded gzipped data from subtitle(s) id(s)
# data = os_client.download_subtitles(subs_ids[:5]) # get data only for the first 5 subs
# print('Sub data: {0}'.format(pprint(data)))
#
# download subtitle file from encoded gzipped data
# download_sub_from_encoded_data(data[0]['data'], 'decoded_sub.srt')
#
# download subtitle file from url
# download_file_from_url(subs_links[0], 'direct_downloaded.gz')
#
# os_client.logout()
#
# extract plots and synopsis from IMDb
# plot_summaries_text, synopsis_text = get_movie_info(imdb_id)
# print('Found {0} plot(s):\n'.format(len(plot_summaries_text)))
# for i in range(0, len(plot_summaries_text)):
# print('[{0}]: {1}'.format(i + 1, plot_summaries_text[i]))
# print('\nSynopsis: {0}'.format(synopsis_text))
#
# documents = [
# "The sky is blue",
# "The sun is bright",
# "The sun in the sky is bright",
# "We can see the shining sun, the bright sun"]
#
# tfidf_vectorizer = TfidfVectorizer(stop_words='english')
# tfidf = tfidf_vectorizer.fit_transform(documents)
#
# print('Features:\n{0}'.format(tfidf_vectorizer.get_feature_names()))
#
# cosine_similarities = linear_kernel(tfidf[0:1], tfidf).flatten()
#
# print('Unsorted similarities:\n{0}'.format(cosine_similarities))
# related_docs_indices = cosine_similarities.argsort()[:-5:-1]
# print('Ranking (indexes):\n{0}'.format(related_docs_indices))
# print('Sorted similarities:\n{0}'.format(cosine_similarities[related_docs_indices]))
# print('Most similar:\n{0}'.format(documents[related_docs_indices[1]]))
start_time = time.clock()
documents = []
indexes = {} # imdb_id, doc_num
ids_list = [] # [imdb_id]
doc_num = 0
for ps_file in glob.glob('data/imdb/plots-synopses/*.txt'):
with open(ps_file, 'r') as f:
imdb_id = re.split('/|\.', ps_file)[-2]
indexes[imdb_id] = doc_num
content = f.read()
documents.append(content)
ids_list.append(imdb_id)
doc_num += 1
print('{0}\n\ndocs num: {1}'.format(indexes, len(documents)))
tfidf_vectorizer = TfidfVectorizer(stop_words='english')
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
zorro_id = indexes.get('120746')
print(zorro_id)
tfidf_vectorizer = TfidfVectorizer(stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(documents)
print('Features:\n{0}'.format(tfidf_vectorizer.get_feature_names()))
cosine_similarities = linear_kernel(tfidf[0:1], tfidf).flatten()
print('Unsorted similarities:\n{0}'.format(cosine_similarities))
related_docs_indices = cosine_similarities.argsort()[:-5:-1]
print('Ranking (indexes):\n{0}'.format(related_docs_indices))
print('Sorted similarities:\n{0}'.format(cosine_similarities[related_docs_indices]))
most_similar_id = related_docs_indices[1]
print('Most similar plot/syn:\n{0}\n\nId:{1}'.format(documents[most_similar_id], ids_list[most_similar_id]))
end_time = time.clock() - start_time
print('Execution time: {0} seconds.'.format(end_time))
| gpl-2.0 |
kirangonella/BuildingMachineLearningSystemsWithPython | ch10/large_classification.py | 19 | 3271 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
import mahotas as mh
from glob import glob
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.grid_search import GridSearchCV
import numpy as np
basedir = 'AnimTransDistr'
print('This script will test classification of the AnimTransDistr dataset')
C_range = 10.0 ** np.arange(-4, 3)
grid = GridSearchCV(LogisticRegression(), param_grid={'C' : C_range})
clf = Pipeline([('preproc', StandardScaler()),
('classifier', grid)])
def features_for(im):
from features import chist
im = mh.imread(im)
img = mh.colors.rgb2grey(im).astype(np.uint8)
return np.concatenate([mh.features.haralick(img).ravel(),
chist(im)])
def images():
'''Iterate over all (image,label) pairs
This function will return
'''
for ci, cl in enumerate(classes):
images = glob('{}/{}/*.jpg'.format(basedir, cl))
for im in sorted(images):
yield im, ci
classes = [
'Anims',
'Cars',
'Distras',
'Trans',
]
print('Computing whole-image texture features...')
ifeatures = []
labels = []
for im, ell in images():
ifeatures.append(features_for(im))
labels.append(ell)
ifeatures = np.array(ifeatures)
labels = np.array(labels)
cv = cross_validation.KFold(len(ifeatures), 5, shuffle=True, random_state=123)
scores0 = cross_validation.cross_val_score(
clf, ifeatures, labels, cv=cv)
print('Accuracy (5 fold x-val) with Logistic Regression [image features]: {:.1%}'.format(
scores0.mean()))
from sklearn.cluster import KMeans
from mahotas.features import surf
print('Computing SURF descriptors...')
alldescriptors = []
for im,_ in images():
im = mh.imread(im, as_grey=True)
im = im.astype(np.uint8)
# To use dense sampling, you can try the following line:
# alldescriptors.append(surf.dense(im, spacing=16))
alldescriptors.append(surf.surf(im, descriptor_only=True))
print('Descriptor computation complete.')
k = 256
km = KMeans(k)
concatenated = np.concatenate(alldescriptors)
print('Number of descriptors: {}'.format(
len(concatenated)))
concatenated = concatenated[::64]
print('Clustering with K-means...')
km.fit(concatenated)
sfeatures = []
for d in alldescriptors:
c = km.predict(d)
sfeatures.append(np.bincount(c, minlength=k))
sfeatures = np.array(sfeatures, dtype=float)
print('predicting...')
score_SURF = cross_validation.cross_val_score(
clf, sfeatures, labels, cv=cv).mean()
print('Accuracy (5 fold x-val) with Logistic Regression [SURF features]: {:.1%}'.format(
score_SURF.mean()))
print('Performing classification with all features combined...')
allfeatures = np.hstack([sfeatures, ifeatures])
score_SURF_global = cross_validation.cross_val_score(
clf, allfeatures, labels, cv=cv).mean()
print('Accuracy (5 fold x-val) with Logistic Regression [All features]: {:.1%}'.format(
score_SURF_global.mean()))
| mit |
smartscheduling/scikit-learn-categorical-tree | sklearn/tests/test_isotonic.py | 16 | 11166 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
pchmieli/h2o-3 | h2o-py/h2o/estimators/estimator_base.py | 1 | 11755 | from ..model.model_base import ModelBase
from ..model.autoencoder import H2OAutoEncoderModel
from ..model.binomial import H2OBinomialModel
from ..model.clustering import H2OClusteringModel
from ..model.dim_reduction import H2ODimReductionModel
from ..model.multinomial import H2OMultinomialModel
from ..model.regression import H2ORegressionModel
from ..model.metrics_base import *
from ..h2o import H2OConnection, H2OJob, H2OFrame
import h2o
import inspect
import warnings
import types
class EstimatorAttributeError(AttributeError):
def __init__(self,obj,method):
super(AttributeError, self).__init__("No {} method for {}".format(method,obj.__class__.__name__))
class H2OEstimator(ModelBase):
"""H2O Estimators
H2O Estimators implement the following methods for model construction:
* start - Top-level user-facing API for asynchronous model build
* join - Top-level user-facing API for blocking on async model build
* train - Top-level user-facing API for model building.
* fit - Used by scikit-learn.
Because H2OEstimator instances are instances of ModelBase, these objects can use the
H2O model API.
"""
def start(self,x,y=None,training_frame=None,offset_column=None,fold_column=None,weights_column=None,validation_frame=None,**params):
"""Asynchronous model build by specifying the predictor columns, response column, and any
additional frame-specific values.
To block for results, call join.
Parameters
----------
x : list
A list of column names or indices indicating the predictor columns.
y : str
An index or a column name indicating the response column.
training_frame : H2OFrame
The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
offset_column : str, optional
The name or index of the column in training_frame that holds the offsets.
fold_column : str, optional
The name or index of the column in training_frame that holds the per-row fold
assignments.
weights_column : str, optional
The name or index of the column in training_frame that holds the per-row weights.
validation_frame : H2OFrame, optional
H2OFrame with validation data to be scored on while training.
"""
self._future=True
self.train(x=x,
y=y,
training_frame=training_frame,
offset_column=offset_column,
fold_column=fold_column,
weights_column=weights_column,
validation_frame=validation_frame,
**params)
def join(self):
self._future=False
self._job.poll()
self._job=None
def train(self,x,y=None,training_frame=None,offset_column=None,fold_column=None,weights_column=None,validation_frame=None,**params):
"""Train the H2O model by specifying the predictor columns, response column, and any
additional frame-specific values.
Parameters
----------
x : list
A list of column names or indices indicating the predictor columns.
y : str
An index or a column name indicating the response column.
training_frame : H2OFrame
The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
offset_column : str, optional
The name or index of the column in training_frame that holds the offsets.
fold_column : str, optional
The name or index of the column in training_frame that holds the per-row fold
assignments.
weights_column : str, optional
The name or index of the column in training_frame that holds the per-row weights.
validation_frame : H2OFrame, optional
H2OFrame with validation data to be scored on while training.
"""
algo_params = locals()
parms = self._parms.copy()
parms.update({k:v for k, v in algo_params.iteritems() if k not in ["self","params", "algo_params", "parms"] })
y = algo_params["y"]
tframe = algo_params["training_frame"]
if tframe is None: raise ValueError("Missing training_frame")
if y is not None:
if isinstance(y, (list, tuple)):
if len(y) == 1: parms["y"] = y[0]
else: raise ValueError('y must be a single column reference')
self._estimator_type = "classifier" if tframe[y].isfactor() else "regressor"
self.build_model(parms)
def build_model(self, algo_params):
if algo_params["training_frame"] is None: raise ValueError("Missing training_frame")
x = algo_params.pop("x")
y = algo_params.pop("y",None)
training_frame = algo_params.pop("training_frame")
validation_frame = algo_params.pop("validation_frame",None)
is_auto_encoder = (algo_params is not None) and ("autoencoder" in algo_params and algo_params["autoencoder"])
algo = self._compute_algo()
is_unsupervised = is_auto_encoder or algo == "pca" or algo == "svd" or algo == "kmeans" or algo == "glrm"
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_unsupervised and y is None: raise ValueError("Missing response")
self._model_build(x, y, training_frame, validation_frame, algo_params)
def _model_build(self, x, y, tframe, vframe, kwargs):
kwargs['training_frame'] = tframe
if vframe is not None: kwargs["validation_frame"] = vframe
if isinstance(y, int): y = tframe.names[y]
if y is not None: kwargs['response_column'] = y
if not isinstance(x, (list,tuple)): x=[x]
if isinstance(x[0], int):
x = [tframe.names[i] for i in x]
offset = kwargs["offset_column"]
folds = kwargs["fold_column"]
weights= kwargs["weights_column"]
ignored_columns = list(set(tframe.names) - set(x + [y,offset,folds,weights]))
kwargs["ignored_columns"] = None if ignored_columns==[] else [h2o.h2o._quoted(col) for col in ignored_columns]
kwargs = dict([(k, (kwargs[k]._frame()).frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if kwargs[k] is not None]) # gruesome one-liner
algo = self._compute_algo()
model = H2OJob(H2OConnection.post_json("ModelBuilders/"+algo, **kwargs), job_type=(algo+" Model Build"))
if self._future:
self._job = model
return
model.poll()
if '_rest_version' in kwargs.keys(): model_json = H2OConnection.get_json("Models/"+model.dest_key, _rest_version=kwargs['_rest_version'])["models"][0]
else: model_json = H2OConnection.get_json("Models/"+model.dest_key)["models"][0]
self._resolve_model(model.dest_key,model_json)
def _resolve_model(self, model_id, model_json):
metrics_class, model_class = H2OEstimator._metrics_class(model_json)
m = model_class()
m._id = model_id
m._model_json = model_json
m._metrics_class = metrics_class
m._parms = self._parms
if model_id is not None and model_json is not None and metrics_class is not None:
# build Metric objects out of each metrics
for metric in ["training_metrics", "validation_metrics", "cross_validation_metrics"]:
if metric in model_json["output"]:
if model_json["output"][metric] is not None:
if metric=="cross_validation_metrics":
m._is_xvalidated=True
model_json["output"][metric] = metrics_class(model_json["output"][metric],metric,model_json["algo"])
if m._is_xvalidated: m._xval_keys= [i["name"] for i in model_json["output"]["cross_validation_models"]]
# build a useful dict of the params
for p in m._model_json["parameters"]: m.parms[p["label"]]=p
H2OEstimator.mixin(self,model_class)
self.__dict__.update(m.__dict__.copy())
def _compute_algo(self):
name = self.__class__.__name__
if name == "H2ODeepLearningEstimator": return "deeplearning"
if name == "H2OAutoEncoderEstimator": return "deeplearning"
if name == "H2OGradientBoostingEstimator": return "gbm"
if name == "H2OGeneralizedLinearEstimator": return "glm"
if name == "H2OGeneralizedLowRankEstimator": return "glrm"
if name == "H2OKMeansEstimator": return "kmeans"
if name == "H2ONaiveBayesEstimator": return "naivebayes"
if name == "H2ORandomForestEstimator": return "drf"
if name == "H2OPCA": return "pca"
if name == "H2OSVD": return "svd"
@staticmethod
def mixin(obj,cls):
for name in cls.__dict__:
if name.startswith('__') and name.endswith('__') or not type(cls.__dict__[name])==types.FunctionType:
continue
obj.__dict__[name]=cls.__dict__[name].__get__(obj)
##### Scikit-learn Interface Methods #####
def fit(self, X, y=None, **params):
"""Fit an H2O model as part of a scikit-learn pipeline or grid search.
A warning will be issued if a caller other than sklearn attempts to use this method.
Parameters
----------
X : H2OFrame
An H2OFrame consisting of the predictor variables.
y : H2OFrame, optional
An H2OFrame consisting of the response variable.
params : optional
Extra arguments.
Returns
-------
The current instance of H2OEstimator for method chaining.
"""
stk = inspect.stack()[1:]
warn = True
for s in stk:
mod = inspect.getmodule(s[0])
if mod:
warn = "sklearn" not in mod.__name__
if not warn: break
if warn:
warnings.warn("\n\n\t`fit` is not recommended outside of the sklearn framework. Use `train` instead.", UserWarning, stacklevel=2)
training_frame = X.cbind(y) if y is not None else X
X = X.names
y = y.names[0] if y is not None else None
self.train(X, y, training_frame, **params)
return self
def get_params(self, deep=True):
"""Useful method for obtaining parameters for this estimator. Used primarily for
sklearn Pipelines and sklearn grid search.
Parameters
----------
deep : bool, optional
If True, return parameters of all sub-objects that are estimators.
Returns
-------
A dict of parameters
"""
out = dict()
for key,value in self.parms.iteritems():
if deep and isinstance(value, H2OEstimator):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **parms):
"""Used by sklearn for updating parameters during grid search.
Parameters
----------
parms : dict
A dictionary of parameters that will be set on this model.
Returns
-------
Returns self, the current estimator object with the parameters all set as desired.
"""
self._parms.update(parms)
return self
@staticmethod
def _metrics_class(model_json):
model_type = model_json["output"]["model_category"]
if model_type=="Binomial": metrics_class = H2OBinomialModelMetrics; model_class = H2OBinomialModel
elif model_type=="Clustering": metrics_class = H2OClusteringModelMetrics; model_class = H2OClusteringModel
elif model_type=="Regression": metrics_class = H2ORegressionModelMetrics; model_class = H2ORegressionModel
elif model_type=="Multinomial": metrics_class = H2OMultinomialModelMetrics; model_class = H2OMultinomialModel
elif model_type=="AutoEncoder": metrics_class = H2OAutoEncoderModelMetrics; model_class = H2OAutoEncoderModel
elif model_type=="DimReduction": metrics_class = H2ODimReductionModelMetrics; model_class = H2ODimReductionModel
else: raise NotImplementedError(model_type)
return [metrics_class,model_class] | apache-2.0 |
ningchi/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
jkthompson/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 69 | 31676 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| gpl-3.0 |
bthirion/scikit-learn | sklearn/neighbors/tests/test_kde.py | 26 | 5518 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol,
X, Y, dens_true)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
yuyu2172/image-labelling-tool | examples/ssd/demo.py | 1 | 1243 | import argparse
import matplotlib.pyplot as plot
import yaml
import chainer
from chainercv.datasets import voc_detection_label_names
from chainercv.links import SSD300
from chainercv import utils
from chainercv.visualizations import vis_bbox
from original_detection_dataset import OriginalDetectionDataset
def main():
chainer.config.train = False
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained_model')
parser.add_argument('image')
parser.add_argument(
'--label_names', help='The path to the yaml file with label names')
args = parser.parse_args()
with open(args.label_names, 'r') as f:
label_names = tuple(yaml.load(f))
model = SSD300(
n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
bboxes, labels, scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0]
vis_bbox(
img, bbox, label, score, label_names=label_names)
plot.show()
if __name__ == '__main__':
main()
| mit |
Quantipy/quantipy | tests/test_merging.py | 1 | 20330 | import unittest
import os.path
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import test_helper
import copy
import json
from operator import lt, le, eq, ne, ge, gt
from pandas.core.index import Index
__index_symbol__ = {
Index.union: ',',
Index.intersection: '&',
Index.difference: '~',
Index.sym_diff: '^'
}
from collections import defaultdict, OrderedDict
from quantipy.core.stack import Stack
from quantipy.core.chain import Chain
from quantipy.core.link import Link
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.view_generators.view_maps import QuantipyViews
from quantipy.core.view import View
from quantipy.core.helpers import functions
from quantipy.core.helpers.functions import load_json
from quantipy.core.tools.dp.prep import (
start_meta,
frange,
frequency,
crosstab,
subset_dataset,
hmerge,
vmerge
)
from quantipy.core.tools.view.query import get_dataframe
class TestMerging(unittest.TestCase):
def setUp(self):
self.path = './tests/'
# self.path = ''
project_name = 'Example Data (A)'
# Load Example Data (A) data and meta into self
name_data = '%s.csv' % (project_name)
path_data = '%s%s' % (self.path, name_data)
self.example_data_A_data = pd.DataFrame.from_csv(path_data)
name_meta = '%s.json' % (project_name)
path_meta = '%s%s' % (self.path, name_meta)
self.example_data_A_meta = load_json(path_meta)
# Variables by type for Example Data A
self.dk = 'Example Data (A)'
self.fk = 'no_filter'
self.single = ['gender', 'locality', 'ethnicity', 'religion', 'q1']
self.delimited_set = ['q2', 'q3', 'q8', 'q9']
self.q5 = ['q5_1', 'q5_2', 'q5_3']
def test_subset_dataset(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
subset_rows_l = 10
subset_cols_l = len(subset_columns_l)
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l
)
# check general characteristics of merged dataset
self.assertItemsEqual(meta_l['columns'].keys(), subset_columns_l)
datafile_items = meta_l['sets']['data file']['items']
datafile_columns = [item.split('@')[-1]for item in datafile_items]
self.assertItemsEqual(meta_l['columns'].keys(), datafile_columns)
self.assertItemsEqual(data_l.columns.tolist(), datafile_columns)
self.assertItemsEqual(data_l.columns.tolist(), subset_columns_l)
self.assertEqual(data_l.shape, (subset_rows_l, subset_cols_l))
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
subset_rows_r = 10
subset_cols_r = len(subset_columns_r)
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r
)
# check general characteristics of merged dataset
self.assertItemsEqual(meta_r['columns'].keys(), subset_columns_r)
datafile_items = meta_r['sets']['data file']['items']
datafile_columns = [item.split('@')[-1]for item in datafile_items]
self.assertItemsEqual(meta_r['columns'].keys(), datafile_columns)
self.assertItemsEqual(data_r.columns.tolist(), datafile_columns)
self.assertItemsEqual(data_r.columns.tolist(), subset_columns_r)
self.assertEqual(data_r.shape, (subset_rows_r, subset_cols_r))
dataset_right = (meta_r, data_r)
def test_hmerge_basic(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l)
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r)
dataset_right = (meta_r, data_r)
# hmerge datasets using left_on/right_on
meta_hm, data_hm = hmerge(
dataset_left, dataset_right,
left_on='unique_id', right_on='unique_id',
verbose=False)
# check merged dataframe
verify_hmerge_data(self, data_l, data_r, data_hm, meta_hm)
# hmerge datasets using on
meta_hm, data_hm = hmerge(
dataset_left, dataset_right,
on='unique_id',
verbose=False)
# check merged dataframe
verify_hmerge_data(self, data_l, data_r, data_hm, meta_hm)
def test_vmerge_basic(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l)
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r)
dataset_right = (meta_r, data_r)
# vmerge datasets using left_on/right_on
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
left_on='unique_id', right_on='unique_id',
verbose=False)
# check merged dataframe
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm)
# vmerge datasets using on
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
on='unique_id',
verbose=False)
# check merged data
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm)
def test_vmerge_row_id(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l)
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r)
dataset_right = (meta_r, data_r)
# vmerge datasets indicating row_id
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
on='unique_id',
row_id_name='DataSource',
left_id=1, right_id=2,
verbose=False)
expected = {'text': {'en-GB': 'vmerge row id'}, 'type': 'int', 'name': 'DataSource'}
actual = meta_vm['columns']['DataSource']
self.assertEqual(actual, expected)
self.assertTrue(data_vm['DataSource'].dtype=='int64')
# check merged dataframe
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm,
row_id_name='DataSource', left_id=1, right_id=2)
# vmerge datasets indicating row_id
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
on='unique_id',
row_id_name='DataSource',
left_id=1, right_id=2.0,
verbose=False)
expected = {'text': {'en-GB': 'vmerge row id'}, 'type': 'float', 'name': 'DataSource'}
actual = meta_vm['columns']['DataSource']
self.assertEqual(actual, expected)
self.assertTrue(data_vm['DataSource'].dtype=='float64')
# check merged dataframe
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm,
row_id_name='DataSource', left_id=1, right_id=2.0)
# vmerge datasets indicating row_id
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
on='unique_id',
row_id_name='DataSource',
left_id='W1', right_id=2.0,
verbose=False)
expected = {'text': {'en-GB': 'vmerge row id'}, 'type': 'str', 'name': 'DataSource'}
actual = meta_vm['columns']['DataSource']
self.assertEqual(actual, expected)
self.assertTrue(data_vm['DataSource'].dtype=='object')
# check merged dataframe
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm,
row_id_name='DataSource', left_id='W1', right_id='2.0')
def test_vmerge_blind_append(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l)
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r)
dataset_right = (meta_r, data_r)
# vmerge datasets indicating row_id
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
verbose=False)
# check merged dataframe
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm,
blind_append=True)
def test_vmerge_blind_append_row_id(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l)
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r)
dataset_right = (meta_r, data_r)
# vmerge datasets indicating row_id
dataset_left = (meta_l, data_l)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
row_id_name='DataSource',
left_id=1, right_id=2,
verbose=False)
# check merged dataframe
verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm,
row_id_name='DataSource', left_id=1, right_id=2,
blind_append=True)
def test_hmerge_vmerge_basic(self):
meta = self.example_data_A_meta
data = self.example_data_A_data
# Create left dataset
subset_columns_l = [
'unique_id', 'gender', 'locality', 'ethnicity', 'q2', 'q3']
meta_l, data_l = subset_dataset(
meta, data[:10],
columns=subset_columns_l)
dataset_left = (meta_l, data_l)
# Create right dataset
subset_columns_r = [
'unique_id', 'gender', 'religion', 'q1', 'q2', 'q8', 'q9']
meta_r, data_r = subset_dataset(
meta, data[5:15],
columns=subset_columns_r)
dataset_right = (meta_r, data_r)
# hmerge datasets
meta_hm, data_hm = hmerge(
dataset_left, dataset_right,
left_on='unique_id', right_on='unique_id',
verbose=False)
# check merged dataframe
verify_hmerge_data(self, data_l, data_r, data_hm, meta_hm)
# vmerge datasets
dataset_left = (meta_hm, data_hm)
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
left_on='unique_id', right_on='unique_id',
verbose=False)
# check merged dataframe
verify_vmerge_data(self, data_hm, data_r, data_vm, meta_vm)
# ##################### Helper functions #####################
def verify_hmerge_data(self, data_l, data_r, data_hm, meta_hm):
# check general characteristics of merged dataset
combined_columns = data_l.columns.union(data_r.columns)
self.assertItemsEqual(meta_hm['columns'].keys(), combined_columns)
self.assertItemsEqual(meta_hm['columns'].keys(), combined_columns)
datafile_items = meta_hm['sets']['data file']['items']
datafile_columns = [item.split('@')[-1]for item in datafile_items]
self.assertItemsEqual(meta_hm['columns'].keys(), datafile_columns)
self.assertEqual(data_hm.shape, (data_l.shape[0], len(combined_columns)))
# Slicers to assist in checking
l_in_r_rows = data_r['unique_id'].isin(data_l['unique_id'])
r_in_l_rows = data_l['unique_id'].isin(data_r['unique_id'])
l_rows = data_hm['unique_id'].isin(data_l['unique_id'])
r_rows = data_hm['unique_id'].isin(data_r['unique_id'])
overlap_rows = l_rows & r_rows
l_only_rows = l_rows & (l_rows ^ r_rows)
r_only_rows = r_rows & (l_rows ^ r_rows)
new_columns = ['unique_id'] + data_r.columns.difference(data_l.columns).tolist()
# check data from left dataset
actual = data_hm[data_l.columns].fillna(999)
expected = data_l.fillna(999)
assert_frame_equal(actual, expected)
# check data from right dataset
actual = data_hm.ix[r_rows, new_columns].fillna(999)
expected = data_r.ix[l_in_r_rows, new_columns].fillna(999)
self.assertTrue(all([all(values) for values in (actual==expected).values]))
def verify_vmerge_data(self, data_l, data_r, data_vm, meta_vm,
row_id_name=None, left_id=None, right_id=None,
blind_append=False):
# Vars to help basic checks
combined_columns = list(data_l.columns.union(data_r.columns))
if not row_id_name is None:
combined_columns.append(row_id_name)
datafile_items = meta_vm['sets']['data file']['items']
datafile_columns = [item.split('@')[-1]for item in datafile_items]
# Check merged meta
self.assertItemsEqual(meta_vm['columns'].keys(), combined_columns)
self.assertItemsEqual(meta_vm['columns'].keys(), datafile_columns)
# Check contents of unique_ids and dataframe shape
ids_left = data_l['unique_id']
ids_right = data_r['unique_id']
if blind_append:
unique_ids = list(ids_left.append(ids_right))
else:
unique_ids = list(set(ids_left).union(set(ids_right)))
self.assertItemsEqual(data_vm['unique_id'], unique_ids)
self.assertEqual(data_vm.shape, (len(unique_ids), len(combined_columns)))
# Slicers to assist in checking
l_in_r_rows = data_r['unique_id'].isin(data_l['unique_id'])
l_not_in_r_rows = l_in_r_rows == False
r_in_l_rows = data_l['unique_id'].isin(data_r['unique_id'])
r_not_in_l_rows = r_in_l_rows == False
l_in_vm_rows = data_vm['unique_id'].isin(data_l['unique_id'])
r_in_vm_rows = data_vm['unique_id'].isin(data_r['unique_id'])
overlap_rows = l_in_vm_rows & r_in_vm_rows
l_only_vm_rows = l_in_vm_rows & (l_in_vm_rows ^ r_in_vm_rows)
r_only_vm_rows = r_in_vm_rows & (l_in_vm_rows ^ r_in_vm_rows)
no_l_rows = data_l.shape[0]
no_r_rows = data_r.shape[0]
l_cols = data_l.columns
r_cols = data_r.columns
l_only_cols = l_cols.difference(r_cols)
r_only_cols = r_cols.difference(l_cols)
all_columnws = l_cols | r_cols
overlap_columns = l_cols & r_cols
new_columns = ['unique_id'] + data_r.columns.difference(data_l.columns).tolist()
### -- LEFT ROWS
# check left rows, left columns
if blind_append:
actual = data_vm.iloc[0:no_l_rows][l_cols].fillna(999)
else:
actual = data_vm.ix[l_in_vm_rows, l_cols].fillna(999)
expected = data_l.fillna(999)
assert_frame_equal(actual, expected)
# check left rows, right only columns
if blind_append:
actual = data_vm.iloc[0:no_l_rows][r_only_cols].fillna(999)
else:
actual = data_vm.ix[l_in_vm_rows, r_only_cols].fillna(999)
self.assertTrue(all([all(values) for values in (actual==999).values]))
# check left rows, row_id column
if not row_id_name is None:
if blind_append:
actual = data_vm.iloc[0:no_l_rows][row_id_name].fillna(999)
else:
actual = data_vm.ix[l_in_vm_rows, row_id_name].fillna(999)
self.assertTrue(all(actual==left_id))
### -- RIGHT ONLY ROWS
# check right only rows, right only columns
if blind_append:
actual = data_vm.iloc[no_l_rows:][r_only_cols].fillna(999)
expected = data_r[r_only_cols].fillna(999)
else:
actual = data_vm.ix[r_only_vm_rows, r_only_cols].fillna(999)
expected = data_r.ix[l_not_in_r_rows, r_only_cols].fillna(999)
comparison_values = actual.values==expected.values
self.assertTrue(all([all(values) for values in (comparison_values)]))
# check right only rows, overlap columns
if blind_append:
actual = data_vm.iloc[no_l_rows:][overlap_columns].fillna(999)
expected = data_r[overlap_columns].fillna(999)
else:
actual = data_vm.ix[r_only_vm_rows, overlap_columns].fillna(999)
expected = data_r.ix[l_not_in_r_rows, overlap_columns].fillna(999)
comparison_values = actual.values==expected.values
self.assertTrue(all([all(values) for values in (comparison_values)]))
# check right only rows, left only columns
if blind_append:
actual = data_vm.iloc[no_l_rows:][l_only_cols].fillna(999)
else:
actual = data_vm.ix[r_only_vm_rows, l_only_cols].fillna(999)
self.assertTrue(all([all(values) for values in (actual==999).values]))
### -- OVERLAP ROWS
if not blind_append:
# check overlap rows, right only columns
actual = data_vm.ix[overlap_rows, r_only_cols].fillna(999)
self.assertTrue(all([all(values) for values in (actual==999).values]))
# check overlap rows, overlap columns
actual = data_vm.ix[overlap_rows, overlap_columns].fillna(999)
expected = data_l.ix[r_in_l_rows, overlap_columns].fillna(999)
self.assertTrue(all([all(values) for values in (actual==expected).values]))
# check overlap rows, left only columns
actual = data_vm.ix[overlap_rows, overlap_columns].fillna(999)
expected = data_l.ix[r_in_l_rows, overlap_columns].fillna(999)
self.assertTrue(all([all(values) for values in (actual==expected).values]))
# check left rows, row_id column
if not row_id_name is None:
actual = data_vm.ix[r_only_vm_rows, row_id_name].fillna(999)
self.assertTrue(all(actual==right_id))
| mit |
zorroblue/scikit-learn | sklearn/linear_model/omp.py | 13 | 31718 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2,
estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv.split(X))
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
ahoyosid/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/series/test_misc_api.py | 1 | 11674 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import Index, Series, DataFrame, date_range
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas import compat
import pandas.formats.printing as printing
from pandas.util.testing import (assert_series_equal,
ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class SharedWithSparse(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEqual(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEqual(result.name, self.ts.name)
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
self.assertIsNone(self.ts.index.name)
self.assertIs(self.ts, self.ts)
cp = self.ts.copy()
cp.index.name = 'foo'
printing.pprint_thing(self.ts.index.name)
self.assertIsNone(self.ts.index.name)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEqual(result.name, self.ts.name)
result = self.ts.mul(self.ts)
self.assertEqual(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assertIsNone(result.name)
result = self.ts.add(cp)
self.assertIsNone(result.name)
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']
ops = ops + ['r' + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'changed'
result = getattr(s, op)(cp)
self.assertIsNone(result.name)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEqual(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEqual(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEqual(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEqual(result.name, self.ts.name)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEqual(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEqual(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEqual(result.name, self.ts.name)
class TestSeriesMisc(TestData, SharedWithSparse, tm.TestCase):
_multiprocess_can_split_ = True
def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
self.assertTrue('str' in dir(s))
self.assertTrue('dt' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .dt
s = Series(date_range('1/1/2015', periods=5))
self.assertTrue('dt' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str
s = Series(list('abbcd'), dtype="category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' in dir(s)) # as it is a string categorical
self.assertTrue('dt' not in dir(s))
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('dt' in dir(s)) # as it is a datetime categorical
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
self.assertRaises(TypeError, hash, s_empty)
self.assertRaises(TypeError, hash, s)
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_iter_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Timestamp)
self.assertEqual(res, exp)
self.assertIsNone(res.tz)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Timestamp)
self.assertEqual(res, exp)
self.assertEqual(res.tz, exp.tz)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Timedelta)
self.assertEqual(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
for res, exp in zip(s, vals):
self.assertIsInstance(res, pd.Period)
self.assertEqual(res, exp)
self.assertEqual(res.freq, 'M')
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assertIs(getkeys(), self.ts.index)
def test_values(self):
self.assert_numpy_array_equal(self.ts, self.ts.values)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
self.assertEqual(val, self.series[idx])
for idx, val in compat.iteritems(self.ts):
self.assertEqual(val, self.ts[idx])
# assert is lazy (genrators don't define reverse, lists do)
self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with tm.assertRaises(AttributeError):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype='float64')
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
self.assertTrue(np.isnan(s2[0]))
self.assertFalse(np.isnan(s[0]))
else:
# we DID modify the original Series
self.assertTrue(np.isnan(s2[0]))
self.assertTrue(np.isnan(s[0]))
# GH 11794
# copy of tz-aware
expected = Series([Timestamp('2012/01/01', tz='UTC')])
expected2 = Series([Timestamp('1999/01/01', tz='UTC')])
for deep in [None, False, True]:
s = Series([Timestamp('2012/01/01', tz='UTC')])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp('1999/01/01', tz='UTC')
# default deep is True
if deep is None or deep is True:
assert_series_equal(s, expected)
assert_series_equal(s2, expected2)
else:
assert_series_equal(s, expected2)
assert_series_equal(s2, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
self.assertEqual(s.dropna().sum('rows'), 3)
self.assertEqual(s._get_axis_number('rows'), 0)
self.assertEqual(s._get_axis_name('rows'), 'index')
def test_numpy_unique(self):
# it works!
np.unique(self.ts)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.argmax()]
result = tsdf.apply(f)
expected = tsdf.max()
assert_series_equal(result, expected)
# .item()
s = Series([1])
result = s.item()
self.assertEqual(result, 1)
self.assertEqual(s.item(), s.iloc[0])
# using an ndarray like function
s = Series(np.random.randn(10))
result = np.ones_like(s)
expected = Series(1, index=range(10), dtype='float64')
# assert_series_equal(result,expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'), s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0, 1., -1], index=list('abc'))
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=['b']))
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='object'))
assert_series_equal(result, exp)
s = Series([0, 1., -1], index=[.1, .2, .3])
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=[.2]))
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='float64'))
assert_series_equal(result, exp)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
s = Series([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):
s.str.repeat(2)
| mit |
florian-f/sklearn | sklearn/metrics/cluster/tests/test_supervised.py | 15 | 7635 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stabability when information is exactly zero"""
for i in np.logspace(1, 4, 4):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
shishaochen/TensorFlow-0.8-Win | tensorflow/examples/skflow/digits.py | 4 | 2395 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, cross_validation, metrics
import tensorflow as tf
from tensorflow.contrib import skflow
from tensorflow.contrib.skflow import monitors
# Load dataset
digits = datasets.load_digits()
X = digits.images
y = digits.target
# Split it into train / test subsets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2,
random_state=42)
# Split X_train again to create validation data
X_train, X_val, y_train, y_val = cross_validation.train_test_split(X_train,
y_train,
test_size=0.2,
random_state=42)
# TensorFlow model using Scikit Flow ops
def conv_model(X, y):
X = tf.expand_dims(X, 3)
features = tf.reduce_max(skflow.ops.conv2d(X, 12, [3, 3]), [1, 2])
features = tf.reshape(features, [-1, 12])
return skflow.models.logistic_regression(features, y)
val_monitor = monitors.ValidationMonitor(X_val, y_val, n_classes=10, print_steps=50)
# Create a classifier, train and predict.
classifier = skflow.TensorFlowEstimator(model_fn=conv_model, n_classes=10,
steps=1000, learning_rate=0.05,
batch_size=128)
classifier.fit(X_train, y_train, val_monitor)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
| apache-2.0 |
jayshonzs/ESL | KernelSmoothing/LocalLinearRegression.py | 1 | 1142 | '''
Created on 2014-6-21
@author: xiajie
'''
import numpy as np
import SimulateData as sd
import matplotlib.pyplot as plt
def augment(X):
lst = [[1, X[i]] for i in range(len(X))]
return np.array(lst)
def Epanechnikov(x0, x, lmbda):
t = np.abs(x0-x)/lmbda
if t > 1:
return 0
else:
return 0.75*(1-t**2)
def kernel(x0, x, lmbda):
return Epanechnikov(x0, x, lmbda)
def Weight(x0, X, lmbda):
W = np.zeros(len(X))
for i in range(len(X)):
W[i] = kernel(x0, X[i], lmbda)
return np.diag(W)
def draw(X, Y):
lmbda = 0.2
predictors = np.arange(0., 1., 0.005)
responses = np.zeros(200)
B = augment(X)
BT = np.transpose(B)
for i in range(len(predictors)):
b = np.zeros(2)
b[0] = 1
b[1] = predictors[i]
W = Weight(predictors[i], X, lmbda)
inv = np.linalg.inv(BT.dot(W).dot(B))
responses[i] = np.transpose(b).dot(inv).dot(BT).dot(W).dot(Y)
plt.plot(predictors, responses)
if __name__ == '__main__':
X, Y = sd.simulate(100)
plt.plot(X, np.sin(4*X))
plt.plot(X, Y, 'ro')
draw(X, Y)
plt.show()
| mit |
paulsheehan/deeperSpeech | audio_input.py | 1 | 1939 | import csv
import os
import librosa
import numpy as np
import tensorflow as tf
import pandas as pd
testOutput = open('data/cluster_test_results.csv', 'w')
outputWriter = csv.writer(testOutput, delimiter=',')
headers = ["id", "freq_min", "freq_max", "freq_std", "mfcc_power", "mfcc_mean", "mfcc_std", "mfcc_delta_mean", "mfcc_delta_std", "min_zero_crossing_rate", "result", "label"]
def import_model():
dataframe = pd.read_csv('data/cluster_output.csv')
return dataframe.as_matrix()
def create_test_input():
dataframe = pd.read_csv('data/test_features.csv')
return dataframe.as_matrix()
def extract_features(i):
sample = data[i*frame:frame+i*frame]
# stft = np.abs(librosa.stft(data))
#data = librosa.util.normalize(data[0:2080])
mfcc = np.mean(librosa.feature.mfcc(y=sample, sr=rate, n_mfcc=26).T, axis=0)
return np.abs(mfcc)
def distribute_samples(samples, data):
# Finds the nearest centroid for each sample
#START from http://esciencegroup.com/2016/01/05/an-encounter-with-googles-tensorflow/
expanded_vectors = tf.expand_dims(samples, 0)
expanded_centroids = tf.expand_dims(data, 1)
distances = tf.square(
tf.subtract(samples, data))
mins = tf.argmin(distances, 0)
# END from http://esciencegroup.com/2016/01/05/an-encounter-with-googles-tensorflow/
nearest_indices = mins
return nearest_indices
model = import_model()
data = create_test_input()
outputWriter.writerow(headers)
testOutput.flush()
X = tf.placeholder(tf.float64, shape=model.shape, name="input")
Y = tf.placeholder(tf.float64, shape=data.shape, name="result")
with tf.Session() as session:
for i in range(len(data[:])):
Y = session.run(distribute_samples(model, data[i][:]))
if i % 100 == 0:
print Y, data[i][10]
outputWriter.writerow([Y[0], Y[1], Y[2], Y[3], Y[4], Y[5], Y[6], Y[7], Y[8], Y[9], Y[10], data[i][10]])
testOutput.flush()
| mit |
rosswhitfield/mantid | qt/python/mantidqt/widgets/plotconfigdialog/axestabwidget/__init__.py | 3 | 2266 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib.ticker import NullLocator
from mpl_toolkits.mplot3d import Axes3D
class AxProperties(dict):
"""
An object to store the properties that can be set in the Axes
Tab. It can be constructed from a view or an Axes object.
"""
def __init__(self, props):
self.update(props)
def __getattr__(self, item):
return self[item]
@classmethod
def from_ax_object(cls, ax):
props = dict()
props['title'] = ax.get_title()
props['xlim'] = ax.get_xlim()
props['xlabel'] = ax.get_xlabel()
props['xscale'] = ax.get_xscale().title()
props['xautoscale'] = ax.get_autoscalex_on()
props['ylim'] = ax.get_ylim()
props['ylabel'] = ax.get_ylabel()
props['yscale'] = ax.get_yscale().title()
props['yautoscale'] = ax.get_autoscaley_on()
props['canvas_color'] = ax.get_facecolor()
if isinstance(ax, Axes3D):
props['zlim'] = ax.get_zlim()
props['zlabel'] = ax.get_zlabel()
props['zscale'] = ax.get_zscale().title()
props['zautoscale'] = ax.get_autoscalez_on()
else:
props['minor_ticks'] = not isinstance(ax.xaxis.minor.locator, NullLocator)
props['minor_gridlines'] = ax.show_minor_gridlines if hasattr(ax, 'show_minor_gridlines') else False
return cls(props)
@classmethod
def from_view(cls, view):
props = dict()
props['title'] = view.get_title()
props['minor_ticks'] = view.get_show_minor_ticks()
props['minor_gridlines'] = view.get_show_minor_gridlines()
props['canvas_color'] = view.get_canvas_color()
ax = view.get_axis()
props[f'{ax}lim'] = (view.get_lower_limit(), view.get_upper_limit())
props[f'{ax}label'] = view.get_label()
props[f'{ax}scale'] = view.get_scale()
return cls(props)
| gpl-3.0 |
DhashS/Olin-Complexity-Final-Project | code/stats.py | 1 | 1959 | from parsers import TSP
from algs import simple_greed
import matplotlib.pyplot as plt
import seaborn as sns
from ggplot import ggplot, aes, geom_point, geom_hline, ggtitle, geom_line
import pandas as pd
import numpy as np
from decorator import decorator
import os
import time
def get_stats(name="", path="../img/", data=None, plots=[]):
if name not in os.listdir(path):
os.mkdir(path+name)
path = path+name+'/'
if not type(data) == TSP:
raise NotImplementedError("Only type TSP allowed")
def _get_stats(f, *args, **kwargs):
#Data aggregation
costs = []
tss = []
for input_args in args[0]:
cost, ts = f(data.graph, input_args)
costs.append(cost)
tss.append(ts)
tss = pd.concat(tss)
costs = pd.concat(costs)
#Display
for plot in plots:
p = plot(costs, tss, path, f)
return(costs, tss)
return decorator(_get_stats)
def scatter_vis(costs, tss, path, f):
plt.figure()
p = ggplot(costs,
aes(x="$N$",
y="cost")) +\
geom_point() +\
geom_hline(y=costs.cost.mean(), color="grey") +\
geom_hline(y=costs.cost.max(), color="red") +\
geom_hline(y=costs.cost.min(), color="green") +\
ggtitle(f.__name__)
p.save(path+scatter_vis.__name__+".pdf")
def dist_across_cost(costs, tss, path):
plt.figure()
p = sns.violinplot(data=costs,
y="cost",
saturation=0)
fig = p.get_figure()
fig.savefig(path+dist_across_cost.__name__+".pdf")
def cost_progress_trace(costs, tss, path):
plt.figure()
p = sns.tsplot(tss,
unit="$N$",
time="progress",
value="current_cost",
err_style="unit_traces")
fig = p.get_figure()
fig.savefig(path+cost_progress_trace.__name__+".pdf")
| gpl-3.0 |
bhargav/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
SummaLabs/DLS | app/backend/core/datasets/dbhelpers.py | 1 | 10266 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import sys
import glob
import fnmatch
import numpy as np
import json
import pandas as pd
from dbutils import calcNumImagesByLabel, getListDirNamesInDir, getListImagesInDirectory, checkFilePath
#################################################
class DBImageImportReader:
numTrainImages=0
numValImages=0
dbConfig=None
listLabels=None
listTrainPath=None
listValPath = None
# TODO: is a good place for this parameter?
minSamplesPerClass = 5
def __init__(self, dbImage2DConfig):
self.setConfig(dbImage2DConfig)
# interface
def resetInfo(self):
self.numLabels = 0
self.numTrainImages = 0
self.numValImages = 0
self.listTrainPath = None
self.listValPath = None
self.listLabels = None
def setConfig(self, dbImage2DConfig):
self.resetInfo()
self.dbConfig = dbImage2DConfig
def getConfig(self):
return self.dbConfig
def getNumTotalImages(self):
return (self.numTrainImages+self.numValImages)
def getNumTrainImages(self):
return self.numTrainImages
def getNumValImages(self):
return self.numValImages
def getNumOfLabels(self):
if self.listLabels is not None:
return len(self.listLabels)
else:
return 0
def getNumByLabelsTrain(self):
return calcNumImagesByLabel(self.listLabels, self.listTrainPath)
def getNumByLabelsVal(self):
return calcNumImagesByLabel(self.listLabels, self.listValPath)
# Override:
def getPathTrainImageByIdx(self, idx):
pass
def getPathValImageByIdx(self, idx):
pass
def precalculateInfo(self):
pass
# Helpers:
def toString(self):
retStr = "%s: #Labels=%d: [%s], #TrainImages=%d, #ValImages=%d" % (
self.__class__,
self.getNumOfLabels(),
self.listLabels,
self.numTrainImages,
self.numValImages)
return retStr
def __repr__(self):
return self.toString()
def __str__(self):
return self.toString()
# private helpers-methods
def _checkNumberOfSamples(self, mapPath, plstLabels, strPref=""):
arrNumSamples = np.array([len(mapPath[ll]) for ll in plstLabels])
arrLabels = np.array(plstLabels)
badLabels = arrLabels[arrNumSamples < self.minSamplesPerClass]
if len(badLabels) > 0:
strBadLabels = ", ".join(badLabels.tolist())
strError = "Incorrect dataset size (%s): labels [%s] has less than %d samples per class" % (
strPref, strBadLabels, self.minSamplesPerClass)
raise Exception(strError)
def _postprocPrecalcInfo(self):
self._checkNumberOfSamples(self.listTrainPath, self.listLabels, strPref='Train')
self._checkNumberOfSamples(self.listValPath, self.listLabels, strPref='Validation')
self.numTrainImages = sum(self.getNumByLabelsTrain().values())
self.numValImages = sum(self.getNumByLabelsVal().values())
self.numLabels = len(self.listLabels)
#################################################
class DBImageImportReaderFromDir(DBImageImportReader):
"""
Load Dataset info from directory.
Directoty structure:
/path/to/dir-with-images/
.
|--class1/
|--class2/
|--class3/
|--...
\__classN/
"""
# def __init__(self, dbImage2DConfig):
# DBImageReader.__init__(self,dbImage2DConfig)
def precalculateInfoFromOneDir(self):
tpathImages = self.dbConfig.getTrainingDir()
if not os.path.isdir(tpathImages):
raise Exception("Cant find directory with images [%s]" % tpathImages)
self.listLabels = sorted(getListDirNamesInDir(tpathImages))
if len(self.listLabels)<2:
strErr = "Incorrect number of classes in directory [%s], more than one needed" % (tpathImages)
raise Exception(strErr)
# (1) prepare list of image-paths for all images
tlistPath = {}
for ll in self.listLabels:
tlistPath[ll] = getListImagesInDirectory(os.path.join(tpathImages, ll))
# (2) check number of images per-class
self._checkNumberOfSamples(tlistPath, self.listLabels)
# (3) split set by train/val
self.listTrainPath={}
self.listValPath={}
ptrain = 1. - float(self.dbConfig.getPercentValidation())/100.
for ll in self.listLabels:
tnum=len(tlistPath[ll])
tnumTrain = int (tnum * ptrain)
tlstPermute = np.random.permutation(np.array(tlistPath[ll]))
self.listTrainPath[ll] = tlstPermute[:tnumTrain].tolist()
self.listValPath[ll] = tlstPermute[tnumTrain:].tolist()
# (4) check Train/Validation lists of classes
self._postprocPrecalcInfo()
def precalculateInfoFromSepDir(self):
tpathTrain = self.dbConfig.getTrainingDir()
tpathVal = self.dbConfig.getValidationDir()
if not os.path.isdir(tpathTrain):
raise Exception("Cant find Train directory [%s]" % tpathTrain)
if not os.path.isdir(tpathVal):
raise Exception("Cant find Validation directory [%s]" % tpathVal)
lstLblTrain = getListDirNamesInDir(tpathTrain)
lstLblVal = getListDirNamesInDir(tpathVal)
#TODO: check label validation code (based on operations with sets)
if len(set(lstLblTrain).difference(set(lstLblVal)))>0:
strErr = "Train set [%s] does not coincide with Validation set [%s]" % (lstLblTrain, lstLblVal)
raise Exception(strErr)
self.listLabels = sorted(list(set(lstLblTrain + lstLblVal)))
self.listTrainPath={}
self.listValPath={}
for ll in self.listLabels:
self.listTrainPath[ll] = getListImagesInDirectory(os.path.join(tpathTrain, ll))
self.listValPath[ll] = getListImagesInDirectory(os.path.join(tpathVal, ll))
self.numTrainImages = sum(self.getNumByLabelsTrain().values())
self.numValImages = sum(self.getNumByLabelsVal().values())
self.numLabels = len(self.listLabels)
def precalculateInfo(self):
if not self.dbConfig.isInitialized():
self.dbConfig.raiseErrorNotInitialized()
if self.dbConfig.isSeparateValDir():
self.precalculateInfoFromSepDir()
else:
self.precalculateInfoFromOneDir()
#################################################
class DBImageImportReaderFromCSV(DBImageImportReader):
pathRootDir=None
def precalculateInfoFromOneCSV(self):
isRelPath = self.dbConfig.isUseRelativeDir()
if isRelPath:
tpathDirRel = self.dbConfig.getRelativeDirPath()
checkFilePath(tpathDirRel, isDirectory=True)
tpathTxt = self.dbConfig.getPathToImageTxt()
checkFilePath(tpathTxt)
tdataTxt = pd.read_csv(tpathTxt, header=None, sep=',')
#TODO: append more validation rules
if len(tdataTxt.shape)<2:
raise Exception('Incorrect CSV file [%s]' % tpathTxt)
#FIXME: check yhis point: is a good way to permute data?
tdataTxt = tdataTxt.as_matrix()[np.random.permutation(range(tdataTxt.shape[0])),:]
self.listLabels = np.sort(np.unique(tdataTxt[:,1])).tolist()
tlistPath = {}
for ll in self.listLabels:
tmp = tdataTxt[tdataTxt[:, 1] == ll, 0].tolist()
if not isRelPath:
tlistPath[ll] = tmp
else:
tlistPath[ll] = [os.path.join(tpathDirRel, ii) for ii in tmp]
self.listTrainPath = {}
self.listValPath = {}
ptrain = 1. - float(self.dbConfig.getPercentValidationTxt())/100.
for ll in self.listLabels:
tnum = len(tlistPath[ll])
tnumTrain = int(tnum * ptrain)
tlstPermute = np.random.permutation(np.array(tlistPath[ll]))
self.listTrainPath[ll] = tlstPermute[:tnumTrain].tolist()
self.listValPath[ll] = tlstPermute[tnumTrain:].tolist()
self._postprocPrecalcInfo()
def precalculateInfoFromSeparateCSV(self):
isRelPath = self.dbConfig.isUseRelativeDir()
if isRelPath:
tpathDirRel = self.dbConfig.getRelativeDirPath()
checkFilePath(tpathDirRel, isDirectory=True)
tpathTxtTrain = self.dbConfig.getPathToImageTxt()
tpathTxtVal = self.dbConfig.getPathToImageTxtVal()
checkFilePath(tpathTxtTrain)
checkFilePath(tpathTxtVal)
tdataTxtTrain = pd.read_csv(tpathTxtTrain, header=None, sep=',')
tdataTxtVal = pd.read_csv(tpathTxtVal, header=None, sep=',')
#TODO: append more validation rules
if len(tdataTxtTrain.shape)<2:
raise Exception('Incorrect CSV file [%s]' % tpathTxtTrain)
if len(tdataTxtVal.shape) < 2:
raise Exception('Incorrect CSV file [%s]' % tpathTxtVal)
tdataTxtTrain = tdataTxtTrain.as_matrix()
tdataTxtVal = tdataTxtVal.as_matrix()
lstLabelsTrain = np.unique(tdataTxtTrain[:, 1]).tolist()
lstLabelsVal = np.unique(tdataTxtVal [:, 1]).tolist()
self.listLabels = sorted(list(set(lstLabelsTrain + lstLabelsVal)))
self.listTrainPath={}
self.listValPath={}
for ll in self.listLabels:
tmpTrain = tdataTxtTrain[tdataTxtTrain[:,1]==ll,0].tolist()
tmpVal = tdataTxtVal [tdataTxtVal [:,1]==ll,0].tolist()
if not isRelPath:
self.listTrainPath[ll] = tmpTrain
self.listValPath[ll] = tmpVal
else:
self.listTrainPath[ll] = [os.path.join(tpathDirRel,ii) for ii in tmpTrain]
self.listValPath[ll] = [os.path.join(tpathDirRel,ii) for ii in tmpVal ]
self._postprocPrecalcInfo()
def precalculateInfo(self):
if not self.dbConfig.isInitialized():
self.dbConfig.raiseErrorNotInitialized()
if self.dbConfig.isSeparateValTxt():
self.precalculateInfoFromSeparateCSV()
else:
self.precalculateInfoFromOneCSV()
if __name__ == '__main__':
pass | mit |
xmnlab/minilab | data_type/array/plot.py | 1 | 1109 | # -*- coding: utf-8 -*-
"""
Visualiza datos en gráficos utilizando la librería google charts
"""
import matplotlib.pyplot as plt
import numpy as np
import random
from matplotlib.ticker import EngFormatter
sample = 2
sensors = [random.sample(xrange(100), sample),
random.sample(xrange(100), sample),
random.sample(xrange(100), sample),
random.sample(xrange(100), sample),
random.sample(xrange(100), sample),
random.sample(xrange(100), sample),
random.sample(xrange(100), sample)]
dados = np.array(sensors)
"Configuração do gráfico"
print(sensors)
formatter = EngFormatter(unit='s', places=1)
plt.grid(True)
"""
"Analisa os sensores"
for sensor in dados:
ax = plt.subplot(111)
ax.xaxis.set_major_formatter(formatter)
xs = []
ys = []
"Analisa os tempos do sensor"
for tempo in sorted(dados[sensor].keys()):
xs.append(tempo)
ys.append(dados[sensor][tempo])
ax.plot(xs, ys)
"""
ax = plt.subplot(111)
ax.xaxis.set_major_formatter(formatter)
ax.plot([.1,.2,.3,.4,.5,.6,.7],dados)
plt.show() | gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tseries/util.py | 7 | 3244 | import warnings
from pandas.compat import lrange
import numpy as np
from pandas.types.common import _ensure_platform_int
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
def pivot_annual(series, freq=None):
"""
Deprecated. Use ``pivot_table`` instead.
Group a series by years, taking leap years into account.
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
The fist column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
For example, if the initial series has a daily frequency, the 59th column
of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st,
and the 60th column is masked for non-leap years.
With a hourly initial frequency, the (59*24)th column of the output always
correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and
the 24 columns between (59*24) and (61*24) are masked.
If the original frequency is less than daily, the output is equivalent to
``series.convert('A', func=None)``.
Parameters
----------
series : Series
freq : string or None, default None
Returns
-------
annual : DataFrame
"""
msg = "pivot_annual is deprecated. Use pivot_table instead"
warnings.warn(msg, FutureWarning)
index = series.index
year = index.year
years = nanops.unique1d(year)
if freq is not None:
freq = freq.upper()
else:
freq = series.index.freq
if freq == 'D':
width = 366
offset = index.dayofyear - 1
# adjust for leap year
offset[(~isleapyear(year)) & (offset >= 59)] += 1
columns = lrange(1, 367)
# todo: strings like 1/1, 1/25, etc.?
elif freq in ('M', 'BM'):
width = 12
offset = index.month - 1
columns = lrange(1, 13)
elif freq == 'H':
width = 8784
grouped = series.groupby(series.index.year)
defaulted = grouped.apply(lambda x: x.reset_index(drop=True))
defaulted.index = defaulted.index.droplevel(0)
offset = np.asarray(defaulted.index)
offset[~isleapyear(year) & (offset >= 1416)] += 24
columns = lrange(1, 8785)
else:
raise NotImplementedError(freq)
flat_index = (year - years.min()) * width + offset
flat_index = _ensure_platform_int(flat_index)
values = np.empty((len(years), width))
values.fill(np.nan)
values.put(flat_index, series.values)
return DataFrame(values, index=years, columns=columns)
def isleapyear(year):
"""
Returns true if year is a leap year.
Parameters
----------
year : integer / sequence
A given (list of) year(s).
"""
msg = "isleapyear is deprecated. Use .is_leap_year property instead"
warnings.warn(msg, FutureWarning)
year = np.asarray(year)
return np.logical_or(year % 400 == 0,
np.logical_and(year % 4 == 0, year % 100 > 0))
| gpl-3.0 |
gwparikh/cvguipy | genetic_compare.py | 2 | 4329 | #!/usr/bin/env python
import os, sys, subprocess
import argparse
import subprocess
import timeit
from multiprocessing import Queue, Lock
from configobj import ConfigObj
from numpy import loadtxt
from numpy.linalg import inv
import matplotlib.pyplot as plt
import moving
from cvguipy import trajstorage, cvgenetic
"""compare all precreated sqlite (by cfg_combination.py) with annotated version using genetic algorithm"""
# class for genetic algorithm
class GeneticCompare(object):
def __init__(self, motalist, IDlist, lock):
self.motalist = motalist
self.IDlist = IDlist
self.lock = lock
# This is used for calculte fitness of individual in genetic algorithm
def computeMOT(self, i):
obj = trajstorage.CVsqlite(sqlite_files+str(i)+".sqlite")
obj.loadObjects()
motp, mota, mt, mme, fpt, gt = moving.computeClearMOT(cdb.annotations, obj.objects, args.matchDistance, firstFrame, lastFrame)
self.lock.acquire()
self.IDlist.put(i)
self.motalist.put(mota)
obj.close()
if args.PrintMOTA:
print("ID", i, " : ", mota)
self.lock.release()
return mota
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="compare all sqlites that are created by cfg_combination.py to the Annotated version to find the ID of the best configuration")
parser.add_argument('-d', '--database-file', dest ='databaseFile', help ="Name of the databaseFile.", required = True)
parser.add_argument('-o', '--homography-file', dest ='homography', help = "Name of the homography file.", required = True)
parser.add_argument('-md', '--matching-distance', dest='matchDistance', help = "matchDistance", default = 10, type = float)
parser.add_argument('-a', '--accuracy', dest = 'accuracy', help = "accuracy parameter for genetic algorithm", type = int)
parser.add_argument('-p', '--population', dest = 'population', help = "population parameter for genetic algorithm", required = True, type = int)
parser.add_argument('-np', '--num-of-parents', dest = 'num_of_parents', help = "Number of parents that are selected each generation", type = int)
parser.add_argument('-mota', '--print-MOTA', dest='PrintMOTA', action = 'store_true', help = "Print MOTA for each ID.")
args = parser.parse_args()
start = timeit.default_timer()
dbfile = args.databaseFile;
homography = loadtxt(args.homography)
sqlite_files = "sql_files/Sqlite_ID_"
cdb = trajstorage.CVsqlite(dbfile)
cdb.open()
cdb.getLatestAnnotation()
cdb.createBoundingBoxTable(cdb.latestannotations, inv(homography))
cdb.loadAnnotaion()
for a in cdb.annotations:
a.computeCentroidTrajectory(homography)
print("Latest Annotaions in "+dbfile+": ", cdb.latestannotations)
# for row in cdb.boundingbox:
# print(row)
cdb.frameNumbers = cdb.getFrameList()
firstFrame = cdb.frameNumbers[0]
lastFrame = cdb.frameNumbers[-1]
# put calculated itmes into a Queue
foundmota = Queue()
IDs = Queue()
lock = Lock()
Comp = GeneticCompare(foundmota, IDs, lock)
config = ConfigObj('range.cfg')
cfg_list = cfgcomb.CVConfigList()
cfgcomb.config_to_list(cfg_list, config)
if args.accuracy != None:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT, args.accuracy)
else:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT)
if args.num_of_parents != None:
GeneticCal.run_thread(args.num_of_parents)
else:
GeneticCal.run_thread()
# tranform queues to lists
foundmota = cvgenetic.Queue_to_list(foundmota)
IDs = cvgenetic.Queue_to_list(IDs)
Best_mota = max(foundmota)
Best_ID = IDs[foundmota.index(Best_mota)]
print("Best multiple object tracking accuracy (MOTA)", Best_mota)
print("ID:", Best_ID)
stop = timeit.default_timer()
print(str(stop-start) + "s")
# matplot
plt.plot(foundmota ,IDs ,'bo')
plt.plot(Best_mota, Best_ID, 'ro')
plt.axis([-1, 1, -1, cfg_list.get_total_combination()])
plt.xlabel('mota')
plt.ylabel('ID')
plt.title(b'Best MOTA: '+str(Best_mota) +'\nwith ID: '+str(Best_ID))
plt.show()
cdb.close()
| mit |
moonbury/pythonanywhere | github/MasteringMLWithScikit-learn/8365OS_04_Codes/ch42.py | 3 | 1763 | import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_score, recall_score, roc_auc_score, auc, confusion_matrix
import numpy as np
from scipy.sparse import hstack
blacklist = [l.strip() for l in open('insults/blacklist.csv', 'rb')]
def get_counts(documents):
return np.array([np.sum([c.lower().count(w) for w in blacklist]) for c in documents])
# Note that I cleaned the trianing data by replacing """ with "
train_df = pd.read_csv('insults/train.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(train_df['Comment'], train_df['Insult'])
vectorizer = TfidfVectorizer(max_features=4000, norm='l2', max_df=0.1,
ngram_range=(1, 1), stop_words='english', use_idf=True)
X_train = vectorizer.fit_transform(X_train_raw)
#X_train_counts = get_counts(X_train_raw)
#X_train = hstack((X_train, X_train_counts.reshape(len(X_train_counts), 1)))
X_test = vectorizer.transform(X_test_raw)
#X_test_counts = get_counts(X_test_raw)
#X_test = hstack((X_test, X_test_counts.reshape(len(X_test_counts), 1)))
classifier = LogisticRegression(penalty='l2', C=1)
classifier.fit_transform(X_train, y_train)
predictions = classifier.predict(X_test)
print 'accuracy', classifier.score(X_test, y_test)
print 'precision', precision_score(y_test, predictions)
print 'recall', recall_score(y_test, predictions)
print 'auc', roc_auc_score(y_test, predictions)
print confusion_matrix(y_true=y_test, y_pred=predictions)
"""
clf__C: 1.0
clf__penalty: 'l2'
vect__max_df: 0.1
vect__max_features: 4000
vect__ngram_range: (1, 1)
vect__norm: 'l2'
vect__use_idf: True
""" | gpl-3.0 |
giorgiop/scikit-learn | sklearn/datasets/samples_generator.py | 7 | 56557 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tseries/holiday.py | 9 | 16176 | import warnings
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa
from pandas.tseries.offsets import Easter, Day
import numpy as np
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year=%s, ' % self.year
info += 'month=%s, day=%s, ' % (self.month, self.day)
if self.offset is not None:
info += 'offset=%s' % self.offset
if self.observance is not None:
info += 'observance=%s' % self.observance
repr = 'Holiday: %s (%s)' % (self.name, info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = DatetimeIndex(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings(record=True):
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(
cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_names : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
| apache-2.0 |
js850/pele | pele/potentials/test_functions/_beale.py | 1 | 3947 | import numpy as np
from numpy import exp, sqrt, cos, pi, sin
from pele.potentials import BasePotential
from pele.systems import BaseSystem
def makeplot2d(f, nx=100, xmin=None, xmax=None, zlim=None, show=True):
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
ny = nx
if xmin is None:
xmin = f.xmin[:2]
xmin, ymin = xmin
if xmax is None:
xmax = f.xmax[:2]
xmax, ymax = xmax
x = np.arange(xmin, xmax, (xmax-xmin)/nx)
y = np.arange(ymin, ymax, (ymax-ymin)/ny)
X, Y = np.meshgrid(x, y)
Z = np.zeros(X.shape)
for i in range(x.size):
for j in range(y.size):
xy = np.array([X[i,j], Y[i,j]])
Z[i,j] = f.getEnergy(xy)
if zlim is not None:
Z = np.where(Z > zlim[1], -1, Z)
fig = plt.figure()
# ax = fig.gca(projection='3d')
mesh = plt.pcolormesh(X, Y, Z, cmap=cm.coolwarm)
# surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
# if zlim is not None:
# ax.set_zlim(zlim)
# ax.zaxis.set_major_locator(LinearLocator(10))
# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(mesh)#surf, shrink=0.5, aspect=5)
if show:
plt.show()
return fig
class Beale(BasePotential):
target_E = 0.
target_coords = np.array([3., 0.5])
xmin = np.array([-4.5, -4.5])
# xmin = np.array([0., 0.])
xmax = np.array([4.5, 4.5])
def getEnergy(self, coords):
x, y = coords
return (1.5 - x + x*y)**2 + (2.25 - x + x * y**2)**2 + (2.625 - x + x * y**3)**2
def getEnergyGradient(self, coords):
E = self.getEnergy(coords)
x, y = coords
dx = 2. * (1.5 - x + x*y) * (-1. + y) + 2. * (2.25 - x + x * y**2) * (-1. + y**2) + 2. * (2.625 - x + x * y**3) * (-1. + y**3)
dy = 2. * (1.5 - x + x*y) * (x) + 2. * (2.25 - x + x * y**2) * (2. * y * x) + 2. * (2.625 - x + x * y**3) * (3. * x * y**2)
return E, np.array([dx, dy])
class BealeSystem(BaseSystem):
def get_potential(self):
return Beale()
def get_random_configuration(self, eps=1e-3):
pot = self.get_potential()
xmin, xmax = pot.xmin, pot.xmax
x = np.random.uniform(xmin[0] + eps, xmax[0] - eps)
y = np.random.uniform(xmin[1] + eps, xmax[1] - eps)
return np.array([x,y])
def add_minimizer(pot, ax, minimizer, x0, **kwargs):
xcb = []
def cb(coords=None, energy=None, rms=None, **kwargs):
xcb.append(coords.copy())
print "energy", energy, rms
minimizer(x0, pot, events=[cb], **kwargs)
xcb = np.array(xcb)
ax.plot(xcb[:,0], xcb[:,1], '-o', label=minimizer.__name__)
def test_minimize():
# from pele.potentials.test_functions import BealeSystem
# from pele.potentials.test_functions._beale import makeplot2d
from pele.optimize import lbfgs_py, fire, steepest_descent
import matplotlib.pyplot as plt
system = BealeSystem()
pot = system.get_potential()
fig = makeplot2d(pot, nx=60, show=False, zlim=[0,50])
ax = fig.gca()
x0 = system.get_random_configuration(eps=.5)
# add_minimizer(pot, ax, fire, x0, nsteps=200, maxstep=.1)
add_minimizer(pot, ax, lbfgs_py, x0, nsteps=200, M=1)
add_minimizer(pot, ax, steepest_descent, x0, nsteps=10000, dx=1e-4)
plt.legend()
# lbfgs_py(system.get_random_configuration(), pot, events=[callback], nsteps=100, M=1)
plt.show()
def test1():
s = BealeSystem()
f = s.get_potential()
f.test_potential(f.target_coords)
print ""
f.test_potential(s.get_random_configuration())
f.test_potential(np.array([1.,1.]))#, print_grads=True)
# from base_function import makeplot2d
v = 3.
makeplot2d(f, nx=60, zlim=[0,100])
if __name__ == "__main__":
test_minimize()
| gpl-3.0 |
flightgong/scikit-learn | sklearn/decomposition/pca.py | 1 | 26496 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
from math import log, sqrt
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils import atleast2d_or_csr
from ..utils import deprecated
from ..utils.sparsefuncs import mean_variance_axis0
from ..utils.extmath import (fast_logdet, safe_sparse_dot, randomized_svd,
fast_dot)
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`mean_` : array, [n_features]
Per-feature empirical mean, estimated from the training set.
`n_components_` : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
`noise_variance_` : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
if self.whiten:
components_ = V / (S[:, np.newaxis] / sqrt(n_samples))
else:
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
X = array2d(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
@deprecated("ProbabilisticPCA will be removed in 0.16. WARNING: the "
"covariance estimation was previously incorrect, your "
"output might be different than under the previous versions. "
"Use PCA that implements score and score_samples. To work with "
"homoscedastic=False, you should use FactorAnalysis.")
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_features)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
explained_variance = self.explained_variance_.copy()
if homoscedastic:
explained_variance -= self.noise_variance_
# Make the low rank part of the estimated covariance
self.covariance_ = np.dot(self.components_[:n_components].T *
explained_variance,
self.components_[:n_components])
if n_features == n_components:
delta = 0.
elif homoscedastic:
delta = self.noise_variance_
else:
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
delta = (Xr ** 2).mean(axis=0) / (n_features - n_components)
# Add delta to the diagonal without extra allocation
self.covariance_.flat[::n_features + 1] += delta
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(self.covariance_)
+ n_features * log(2. * np.pi))
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`mean_` : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Notes
-----
This class supports sparse matrix input for backward compatibility, but
actually computes a truncated SVD instead of a PCA in that case (i.e. no
centering is performed). This support is deprecated; use the class
TruncatedSVD for sparse matrix support.
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
if sparse.issparse(X):
warnings.warn("Sparse matrix support is deprecated"
" and will be dropped in 0.16."
" Use TruncatedSVD instead.",
DeprecationWarning)
else:
# not a sparse matrix, ensure this is a 2D array
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
if sparse.issparse(X):
self.mean_ = None
else:
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
if sparse.issparse(X):
_, full_var = mean_variance_axis0(X)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# XXX remove scipy.sparse support here in 0.16
X = atleast2d_or_csr(X)
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = self._fit(atleast2d_or_csr(X))
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
# XXX remove scipy.sparse support here in 0.16
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
suvam97/zeppelin | python/src/main/resources/bootstrap.py | 4 | 5728 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PYTHON 2 / 3 compatibility :
# bootstrap.py must be runnable with Python 2 or 3
# Remove interactive mode displayhook
import sys
import signal
try:
import StringIO as io
except ImportError:
import io as io
sys.displayhook = lambda x: None
def intHandler(signum, frame): # Set the signal handler
print ("Paragraph interrupted")
raise KeyboardInterrupt()
signal.signal(signal.SIGINT, intHandler)
def help():
print ('%html')
print ('<h2>Python Interpreter help</h2>')
print ('<h3>Python 2 & 3 compatibility</h3>')
print ('<p>The interpreter is compatible with Python 2 & 3.<br/>')
print ('To change Python version, ')
print ('change in the interpreter configuration the python to the ')
print ('desired version (example : python=/usr/bin/python3)</p>')
print ('<h3>Python modules</h3>')
print ('<p>The interpreter can use all modules already installed ')
print ('(with pip, easy_install, etc)</p>')
print ('<h3>Forms</h3>')
print ('You must install py4j in order to use '
'the form feature (pip install py4j)')
print ('<h4>Input form</h4>')
print ('<pre>print (z.input("f1","defaultValue"))</pre>')
print ('<h4>Selection form</h4>')
print ('<pre>print(z.select("f2", [("o1","1"), ("o2","2")],2))</pre>')
print ('<h4>Checkbox form</h4>')
print ('<pre> print("".join(z.checkbox("f3", [("o1","1"), '
'("o2","2")],["1"])))</pre>')
print ('<h3>Matplotlib graph</h3>')
print ('<div>The interpreter can display matplotlib graph with ')
print ('the function z.show()</div>')
print ('<div> You need to already have matplotlib module installed ')
print ('to use this functionality !</div><br/>')
print ('''<pre>import matplotlib.pyplot as plt
plt.figure()
(.. ..)
z.show(plt)
plt.close()
</pre>''')
print ('<div><br/> z.show function can take optional parameters ')
print ('to adapt graph width and height</div>')
print ("<div><b>example </b>:")
print ('''<pre>z.show(plt,width='50px')
z.show(plt,height='150px') </pre></div>''')
print ('<h3>Pandas DataFrame</h3>')
print """
<div>The interpreter can visualize Pandas DataFrame
with the function z.show()
<pre>
import pandas as pd
df = pd.read_csv("bank.csv", sep=";")
z.show(df)
</pre></div>
"""
class PyZeppelinContext(object):
""" If py4j is detected, these class will be override
with the implementation in bootstrap_input.py
"""
errorMsg = "You must install py4j Python module " \
"(pip install py4j) to use Zeppelin dynamic forms features"
def __init__(self, zc):
self.z = zc
self.max_result = 1000
def input(self, name, defaultValue=""):
print (self.errorMsg)
def select(self, name, options, defaultValue=""):
print (self.errorMsg)
def checkbox(self, name, options, defaultChecked=[]):
print (self.errorMsg)
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
def show_dataframe(self, df, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = io.StringIO("")
header_buf.write(df.columns[0])
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(col)
header_buf.write("\n")
body_buf = io.StringIO("")
rows = df.head(self.max_result).values if limit else df.values
for row in rows:
body_buf.write(row[0])
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(cell)
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, width="0", height="0", **kwargs):
"""Matplotlib show function
"""
img = io.StringIO()
p.savefig(img, format='svg')
img.seek(0)
style = ""
if (width != "0"):
style += 'width:' + width
if (height != "0"):
if (len(style) != 0):
style += ","
style += 'height:' + height
print("%html <div style='" + style + "'>" + img.read() + "<div>")
img.close()
z = PyZeppelinContext("")
| apache-2.0 |
0x0all/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
pystruct/pystruct | examples/plot_snakes_typed.py | 1 | 7289 | """
==============================================
Conditional Interactions on the Snakes Dataset
==============================================
This is a variant of plot_snakes.py where we use the NodeTypeEdgeFeatureGraphCRF
class instead of EdgeFeatureGraphCRF, despite there is only 1 type of nodes.
So this should give exact same results as plot_snakes.py
This example uses the snake dataset introduced in
Nowozin, Rother, Bagon, Sharp, Yao, Kohli: Decision Tree Fields ICCV 2011
This dataset is specifically designed to require the pairwise interaction terms
to be conditioned on the input, in other words to use non-trival edge-features.
The task is as following: a "snake" of length ten wandered over a grid. For
each cell, it had the option to go up, down, left or right (unless it came from
there). The input consists of these decisions, while the desired output is an
annotation of the snake from 0 (head) to 9 (tail). See the plots for an
example.
As input features we use a 3x3 window around each pixel (and pad with background
where necessary). We code the five different input colors (for up, down, left, right,
background) using a one-hot encoding. This is a rather naive approach, not using any
information about the dataset (other than that it is a 2d grid).
The task can not be solved using the simple DirectionalGridCRF - which can only
infer head and tail (which are also possible to infer just from the unary
features). If we add edge-features that contain the features of the nodes that are
connected by the edge, the CRF can solve the task.
From an inference point of view, this task is very hard. QPBO move-making is
not able to solve it alone, so we use the relaxed AD3 inference for learning.
PS: This example runs a bit (5 minutes on 12 cores, 20 minutes on one core for me).
But it does work as well as Decision Tree Fields ;)
JL Meunier - January 2017
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943
Copyright Xerox
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
# import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
from sklearn.metrics import confusion_matrix, accuracy_score
from pystruct.learners import OneSlackSSVM
from pystruct.datasets import load_snakes
from pystruct.utils import make_grid_edges, edge_list_to_features
#from pystruct.models import EdgeFeatureGraphCRF
from pystruct.models import NodeTypeEdgeFeatureGraphCRF
from plot_snakes import one_hot_colors, neighborhood_feature, prepare_data
def convertToSingleTypeX(X):
"""
For NodeTypeEdgeFeatureGraphCRF X is structured differently.
But NodeTypeEdgeFeatureGraphCRF can handle graph with a single node type. One needs to convert X to the new structure using this method.
"""
return [([nf], [e], [ef]) for (nf,e,ef) in X]
if __name__ == '__main__':
print("Please be patient. Learning will take 5-20 minutes.")
snakes = load_snakes()
X_train, Y_train = snakes['X_train'], snakes['Y_train']
X_train = [one_hot_colors(x) for x in X_train]
Y_train_flat = [y_.ravel() for y_ in Y_train]
X_train_directions, X_train_edge_features = prepare_data(X_train)
inference = 'ad3+'
# first, train on X with directions only:
crf = NodeTypeEdgeFeatureGraphCRF(1, [11], [45], [[2]], inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, max_iter=100,
n_jobs=1)
ssvm.fit(convertToSingleTypeX(X_train_directions), Y_train_flat)
# Evaluate using confusion matrix.
# Clearly the middel of the snake is the hardest part.
X_test, Y_test = snakes['X_test'], snakes['Y_test']
X_test = [one_hot_colors(x) for x in X_test]
Y_test_flat = [y_.ravel() for y_ in Y_test]
X_test_directions, X_test_edge_features = prepare_data(X_test)
Y_pred = ssvm.predict( convertToSingleTypeX(X_test_directions) )
print("Results using only directional features for edges")
print("Test accuracy: %.3f"
% accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred)))
print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred)))
# now, use more informative edge features:
crf = NodeTypeEdgeFeatureGraphCRF(1, [11], [45], [[180]], inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1,
# switch_to='ad3',
#verbose=1,
n_jobs=8)
ssvm.fit( convertToSingleTypeX(X_train_edge_features), Y_train_flat)
Y_pred2 = ssvm.predict( convertToSingleTypeX(X_test_edge_features) )
print("Results using also input features for edges")
print("Test accuracy: %.3f"
% accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred2)))
print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred2)))
# if False:
# # plot stuff
# fig, axes = plt.subplots(2, 2)
# axes[0, 0].imshow(snakes['X_test'][0], interpolation='nearest')
# axes[0, 0].set_title('Input')
# y = Y_test[0].astype(np.int)
# bg = 2 * (y != 0) # enhance contrast
# axes[0, 1].matshow(y + bg, cmap=plt.cm.Greys)
# axes[0, 1].set_title("Ground Truth")
# axes[1, 0].matshow(Y_pred[0].reshape(y.shape) + bg, cmap=plt.cm.Greys)
# axes[1, 0].set_title("Prediction w/o edge features")
# axes[1, 1].matshow(Y_pred2[0].reshape(y.shape) + bg, cmap=plt.cm.Greys)
# axes[1, 1].set_title("Prediction with edge features")
# for a in axes.ravel():
# a.set_xticks(())
# a.set_yticks(())
# plt.show()
"""
Please be patient. Learning will take 5-20 minutes.
Results using only directional features for edges
Test accuracy: 0.847
[[2750 0 0 0 0 0 0 0 0 0 0]
[ 0 99 0 0 1 0 0 0 0 0 0]
[ 0 2 68 3 9 4 6 4 3 1 0]
[ 0 4 11 45 8 14 5 6 0 6 1]
[ 0 1 22 18 31 2 14 4 3 5 0]
[ 0 3 7 38 12 22 5 4 2 7 0]
[ 0 2 19 16 26 8 16 2 9 2 0]
[ 0 6 14 26 10 15 5 12 2 10 0]
[ 0 0 12 15 16 4 16 2 18 4 13]
[ 0 2 5 18 6 8 5 3 2 50 1]
[ 0 1 11 4 13 1 2 0 2 2 64]]
Results using also input features for edges
Test accuracy: 0.998
[[2749 0 0 0 0 0 0 0 1 0 0]
[ 0 100 0 0 0 0 0 0 0 0 0]
[ 0 0 100 0 0 0 0 0 0 0 0]
[ 0 0 0 99 0 0 0 0 0 1 0]
[ 0 0 0 0 99 0 1 0 0 0 0]
[ 0 0 0 1 0 98 0 1 0 0 0]
[ 0 0 0 0 1 0 99 0 0 0 0]
[ 0 0 0 0 0 1 0 99 0 0 0]
[ 0 0 0 0 0 0 0 0 100 0 0]
[ 0 0 0 0 0 0 0 1 0 99 0]
[ 0 0 0 0 0 0 0 0 0 0 100]]
""" | bsd-2-clause |
LeoQuote/ingress-simple-planner | create_plan.py | 1 | 5789 | import csv
import matplotlib.path as mplPath
import numpy as np
import json
class portal:
def __init__(self,name, lat, lon):
self.name = name
self.lat = float(lat)-40
self.lon = float(lon)-116
def __repr__(self):
return '{}'.format(self.name)
def __str__(self):
return '{}'.format(self.name)
class field:
def __init__(self,base1,base2,end):
self.base1 = base1
self.base2 = base2
self.end = end
self._portal_list = [base1,base2,end]
def __repr__(self):
return '<Field {}, {}, {}>'.format(self.base1, self.base2, self.end)
def contains_portal(self,portal):
verts = []
for item in self._portal_list:
verts += [[item.lat,item.lon]]
bbPath = mplPath.Path(np.array(verts))
if bbPath.contains_point(np.array([portal.lat,portal.lon])):
return True
def contains_portals(self,portal_list):
portals_in_triangle = []
for item in portal_list:
if self.contains_portal(item) and self.end.name != item.name:
portals_in_triangle += [item]
return portals_in_triangle
def best_subfield(self,portal_list):
subfield_contains_number = 0
# 设置最优field顶点为候选po列表第一位
best_end = portal_list[0]
portals_in_best_field = []
for portal in portal_list:
if not self.contains_portal(portal): continue
new_field = field(self.base1,self.base2, portal)
portal_in_field = new_field.contains_portals(portal_list)
if len(portal_in_field) > subfield_contains_number:
subfield_contains_number = len(portal_in_field)
best_end = portal
portals_in_best_field = portal_in_field
return (field(self.base1,self.base2,best_end), portals_in_best_field)
class best_plan:
def __init__(self,base1,base2,end,portal_list):
self.base1 = base1
self.base2 = base2
self.end = end
self._starter_field = field(base1,base2,end)
self._portal_list = portal_list
self.waypoints = [end]
self.total_ap = 0
self.total_field = 0
self.total_link = 0
def calculate(self):
calculate_result = self._starter_field.best_subfield(self._portal_list)
(self._starter_field,self._portal_list) =calculate_result
self.waypoints += [self._starter_field.end]
if len(self._portal_list) > 0:
# 如果计算出来是有结果的,而且field下面还有候选po,递归查找子field
self.calculate()
else:
self.waypoints.reverse()
self.total_link = 3 * (len(self.waypoints) + 1)
self.total_field = 3 * len(self.waypoints) + 1
self.total_ap = 313 * self.total_link + 1250 * self.total_field
def _get_line(self,portal1,portal2):
return {'color':'#a24ac3',
'type':'polyline',
'latLngs': [
{'lat':portal1.lat+40,'lng':portal1.lon+116},
{'lat':portal2.lat+40,'lng':portal2.lon+116},
]}
def print_result(self):
plan_dict = [self._get_line(self.base1,self.base2)]
for point in self.waypoints:
plan_dict += [self._get_line(self.base1,point)]
plan_dict += [self._get_line(self.base2,point)]
print("""计算完成,路点总数{}, 路点如下{},总AP {}
{}
""".format(len(self.waypoints),self.waypoints,self.total_ap,json.dumps(plan_dict)))
def is_portal_in_field(portal,field):
"""https://stackoverflow.com/questions/2049582/how-to-determine-if-a-point-is-in-a-2d-triangle
float sign (fPoint p1, fPoint p2, fPoint p3)
{
return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y);
}
bool PointInTriangle (fPoint pt, fPoint v1, fPoint v2, fPoint v3)
{
bool b1, b2, b3;
b1 = sign(pt, v1, v2) < 0.0f;
b2 = sign(pt, v2, v3) < 0.0f;
b3 = sign(pt, v3, v1) < 0.0f;
return ((b1 == b2) && (b2 == b3));
}
"""
return field.contains_portal(portal)
def get_portal_by_name(name,portal_list):
id = 0
for item in portal_list:
if item.name == name:
item = portal_list.pop(id)
return item, portal_list
id += 1
raise NameError('{} portal not found'.format(name))
if __name__ == '__main__':
BASE1 = '新华门'
BASE2 = '国家大剧院'
END = '天安门广场-远眺国家博物馆'
portals = []
with open('portals.csv','r',encoding="utf8") as csvfile:
reader = csv.reader(csvfile)
for row in reader :
data = {
'name':row[0],
'lat':row[1],
'lon':row[2],
}
new_portal = portal(**data)
portals += [new_portal]
(BASE1, portals) = get_portal_by_name(BASE1,portals)
(BASE2, portals) = get_portal_by_name(BASE2,portals)
(best_end, portals) = get_portal_by_name(END,portals)
# 这一段代码在尝试找一个能盖住最多po的field,但并不实用,可以直接指定一个po
# best_end = None
# most_portal_in_field = 0
# for portal in portals :
# largest_field = field(BASE1,BASE2,portal)
# portal_number_in_field = len(largest_field.contains_portals(portals))
# if portal_number_in_field > most_portal_in_field:
# best_end = portal
# most_portal_in_field = portal_number_in_field
new_best_plan = best_plan(BASE1,BASE2,best_end, portals)
new_best_plan.calculate()
new_best_plan.print_result()
# print(len(IN_CIRCLE))
| mit |
yukisakurai/hhana | mva/plotting/classify.py | 5 | 12122 |
# stdlib imports
import os
# local imports
from . import log
from ..variables import VARIABLES
from .. import PLOTS_DIR
from .draw import draw
from statstools.utils import efficiency_cut, significance
# matplotlib imports
from matplotlib import cm
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator, FuncFormatter, IndexLocator
# numpy imports
import numpy as np
import scipy
from scipy import ndimage
from scipy.interpolate import griddata
# rootpy imports
from rootpy.plotting.contrib import plot_corrcoef_matrix
from rootpy.plotting import Hist
from root_numpy import fill_hist
def correlations(signal, signal_weight,
background, background_weight,
fields, category, output_suffix=''):
names = [
VARIABLES[field]['title'] if field in VARIABLES else field
for field in fields]
# draw correlation plots
plot_corrcoef_matrix(signal, fields=names,
output_name=os.path.join(PLOTS_DIR,
"correlation_signal_%s%s.png" % (
category.name, output_suffix)),
title='%s Signal' % category.label,
weights=signal_weight)
plot_corrcoef_matrix(background, fields=names,
output_name=os.path.join(PLOTS_DIR,
"correlation_background_%s%s.png" % (
category.name, output_suffix)),
title='%s Background' % category.label,
weights=background_weight)
def plot_grid_scores(grid_scores, best_point, params, name,
label_all_ticks=False,
n_ticks=10,
title=None,
format='png',
path=PLOTS_DIR):
param_names = sorted(grid_scores[0][0].keys())
param_values = dict([(pname, []) for pname in param_names])
for pvalues, score, cv_scores in grid_scores:
for pname in param_names:
param_values[pname].append(pvalues[pname])
# remove duplicates
for pname in param_names:
param_values[pname] = np.unique(param_values[pname]).tolist()
scores = np.empty(shape=[len(param_values[pname]) for pname in param_names])
for pvalues, score, cv_scores in grid_scores:
index = []
for pname in param_names:
index.append(param_values[pname].index(pvalues[pname]))
scores.itemset(tuple(index), score)
fig = plt.figure(figsize=(6, 6), dpi=100)
ax = plt.axes([.15, .15, .95, .75])
ax.autoscale(enable=False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#cmap = cm.get_cmap('Blues_r', 100)
#cmap = cm.get_cmap('gist_heat', 100)
#cmap = cm.get_cmap('gist_earth', 100)
cmap = cm.get_cmap('jet', 100)
x = np.array(param_values[param_names[1]])
y = np.array(param_values[param_names[0]])
extent = (min(x), max(x), min(y), max(y))
smoothed_scores = ndimage.gaussian_filter(scores, sigma=3)
min_score, max_score = smoothed_scores.min(), smoothed_scores.max()
score_range = max_score - min_score
levels = np.linspace(min_score, max_score, 30)
img = ax.contourf(smoothed_scores, levels=levels, cmap=cmap,
vmin=min_score - score_range/4., vmax=max_score)
cb = plt.colorbar(img, fraction=.06, pad=0.03, format='%.3f')
cb.set_label('AUC')
# label best point
y = param_values[param_names[0]].index(best_point[param_names[0]])
x = param_values[param_names[1]].index(best_point[param_names[1]])
ax.plot([x], [y], marker='o', markersize=10, markeredgewidth=2,
markerfacecolor='none', markeredgecolor='k')
ax.set_ylim(extent[2], extent[3])
ax.set_xlim(extent[0], extent[1])
if label_all_ticks:
plt.xticks(range(len(param_values[param_names[1]])),
param_values[param_names[1]])
plt.yticks(range(len(param_values[param_names[0]])),
param_values[param_names[0]])
else:
trees = param_values[param_names[1]]
def tree_formatter(x, pos):
if x < 0 or x >= len(trees):
return ''
return str(trees[int(x)])
leaves = param_values[param_names[0]]
def leaf_formatter(x, pos):
if x < 0 or x >= len(leaves):
return ''
return '%.3f' % leaves[int(x)]
ax.xaxis.set_major_formatter(FuncFormatter(tree_formatter))
ax.yaxis.set_major_formatter(FuncFormatter(leaf_formatter))
#ax.xaxis.set_major_locator(MaxNLocator(n_ticks, integer=True,
# prune='lower', steps=[1, 2, 5, 10]))
ax.xaxis.set_major_locator(IndexLocator(20, -1))
xticks = ax.xaxis.get_major_ticks()
xticks[-1].label1.set_visible(False)
#ax.yaxis.set_major_locator(MaxNLocator(n_ticks, integer=True,
# steps=[1, 2, 5, 10], prune='lower'))
ax.yaxis.set_major_locator(IndexLocator(20, -1))
yticks = ax.yaxis.get_major_ticks()
yticks[-1].label1.set_visible(False)
#xlabels = ax.get_xticklabels()
#for label in xlabels:
# label.set_rotation(45)
ax.set_xlabel(params[param_names[1]],
position=(1., 0.), ha='right')
ax.set_ylabel(params[param_names[0]],
position=(0., 1.), ha='right')
#ax.set_frame_on(False)
#ax.xaxis.set_ticks_position('none')
#ax.yaxis.set_ticks_position('none')
ax.text(0.1, 0.9,
"{0} Category\nBest AUC = {1:.3f}\nTrees = {2:d}\nFraction = {3:.3f}".format(
name,
scores.max(),
best_point[param_names[1]],
best_point[param_names[0]]),
ha='left', va='top',
transform=ax.transAxes,
bbox=dict(pad=10, facecolor='none', edgecolor='none'))
if title:
plt.suptitle(title)
plt.axis("tight")
plt.savefig(os.path.join(path, "grid_scores_{0}.{1}".format(
name.lower(), format)),
bbox_inches='tight')
plt.clf()
def hist_scores(hist, scores, systematic='NOMINAL'):
for sample, scores_dict in scores:
scores, weight = scores_dict[systematic]
fill_hist(hist, scores, weight)
def plot_clf(background_scores,
category,
signal_scores=None,
signal_scale=1.,
data_scores=None,
name=None,
draw_histograms=True,
draw_data=False,
save_histograms=False,
hist_template=None,
bins=10,
min_score=0,
max_score=1,
signal_colors=cm.spring,
systematics=None,
unblind=False,
**kwargs):
if hist_template is None:
if hasattr(bins, '__iter__'):
# variable width bins
hist_template = Hist(bins)
min_score = min(bins)
max_score = max(bins)
else:
hist_template = Hist(bins, min_score, max_score)
bkg_hists = []
for bkg, scores_dict in background_scores:
hist = hist_template.Clone(title=bkg.label)
scores, weight = scores_dict['NOMINAL']
fill_hist(hist, scores, weight)
hist.decorate(**bkg.hist_decor)
hist.systematics = {}
for sys_term in scores_dict.keys():
if sys_term == 'NOMINAL':
continue
sys_hist = hist_template.Clone()
scores, weight = scores_dict[sys_term]
fill_hist(sys_hist, scores, weight)
hist.systematics[sys_term] = sys_hist
bkg_hists.append(hist)
if signal_scores is not None:
sig_hists = []
for sig, scores_dict in signal_scores:
sig_hist = hist_template.Clone(title=sig.label)
scores, weight = scores_dict['NOMINAL']
fill_hist(sig_hist, scores, weight)
sig_hist.decorate(**sig.hist_decor)
sig_hist.systematics = {}
for sys_term in scores_dict.keys():
if sys_term == 'NOMINAL':
continue
sys_hist = hist_template.Clone()
scores, weight = scores_dict[sys_term]
fill_hist(sys_hist, scores, weight)
sig_hist.systematics[sys_term] = sys_hist
sig_hists.append(sig_hist)
else:
sig_hists = None
if data_scores is not None and draw_data and unblind is not False:
data, data_scores = data_scores
if isinstance(unblind, float):
if sig_hists is not None:
# unblind up to `unblind` % signal efficiency
sum_sig = sum(sig_hists)
cut = efficiency_cut(sum_sig, 0.3)
data_scores = data_scores[data_scores < cut]
data_hist = hist_template.Clone(title=data.label)
data_hist.decorate(**data.hist_decor)
fill_hist(data_hist, data_scores)
if unblind >= 1 or unblind is True:
log.info("Data events: %d" % sum(data_hist))
log.info("Model events: %f" % sum(sum(bkg_hists)))
for hist in bkg_hists:
log.info("{0} {1}".format(hist.GetTitle(), sum(hist)))
log.info("Data / Model: %f" % (sum(data_hist) / sum(sum(bkg_hists))))
else:
data_hist = None
if draw_histograms:
output_name = 'event_bdt_score'
if name is not None:
output_name += '_' + name
for logy in (False, True):
draw(data=data_hist,
model=bkg_hists,
signal=sig_hists,
signal_scale=signal_scale,
category=category,
name="BDT Score",
output_name=output_name,
show_ratio=data_hist is not None,
model_colors=None,
signal_colors=signal_colors,
systematics=systematics,
logy=logy,
**kwargs)
return bkg_hists, sig_hists, data_hist
def draw_ROC(bkg_scores, sig_scores):
# draw ROC curves for all categories
hist_template = Hist(100, -1, 1)
plt.figure()
for category, (bkg_scores, sig_scores) in category_scores.items():
bkg_hist = hist_template.Clone()
sig_hist = hist_template.Clone()
hist_scores(bkg_hist, bkg_scores)
hist_scores(sig_hist, sig_scores)
bkg_array = np.array(bkg_hist)
sig_array = np.array(sig_hist)
# reverse cumsum
bkg_eff = bkg_array[::-1].cumsum()[::-1]
sig_eff = sig_array[::-1].cumsum()[::-1]
bkg_eff /= bkg_array.sum()
sig_eff /= sig_array.sum()
plt.plot(sig_eff, 1. - bkg_eff,
linestyle='-',
linewidth=2.,
label=category)
plt.legend(loc='lower left')
plt.ylabel('Background Rejection')
plt.xlabel('Signal Efficiency')
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.grid()
plt.savefig(os.path.join(PLOTS_DIR, 'ROC.png'), bbox_inches='tight')
def plot_significance(signal, background, ax):
if isinstance(signal, (list, tuple)):
signal = sum(signal)
if isinstance(background, (list, tuple)):
background = sum(background)
# plot the signal significance on the same axis
sig_ax = ax.twinx()
sig, max_sig, max_cut = significance(signal, background)
bins = list(background.xedges())[:-1]
log.info("Max signal significance %.2f at %.2f" % (max_sig, max_cut))
sig_ax.plot(bins, sig, 'k--', label='Signal Significance')
sig_ax.set_ylabel(r'$S / \sqrt{S + B}$',
color='black', fontsize=15, position=(0., 1.), va='top', ha='right')
#sig_ax.tick_params(axis='y', colors='red')
sig_ax.set_ylim(0, max_sig * 2)
plt.text(max_cut, max_sig + 0.02, '(%.2f, %.2f)' % (max_cut, max_sig),
ha='right', va='bottom',
axes=sig_ax)
"""
plt.annotate('(%.2f, %.2f)' % (max_cut, max_sig), xy=(max_cut, max_sig),
xytext=(max_cut + 0.05, max_sig),
arrowprops=dict(color='black', shrink=0.15),
ha='left', va='center', color='black')
"""
| gpl-3.0 |
rsivapr/scikit-learn | sklearn/qda.py | 8 | 7229 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils.fixes import unique
from .utils import check_arrays, array2d, column_or_1d
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
`covariances_` : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
`means_` : array-like, shape = [n_classes, n_features]
Class means.
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1).
`rotations_` : list of arrays
For each class an array of shape [n_samples, n_samples], the
rotation of the Gaussian distribution, i.e. its principal axis.
`scalings_` : array-like, shape = [n_classes, n_features]
Contains the scaling of the Gaussian
distributions along the principal axes for each
class, i.e. the variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
"""
X, y = check_arrays(X, y)
y = column_or_1d(y, warn=True)
self.classes_, y = unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = np.asarray(scalings)
self.rotations_ = rotations
return self
def _decision_function(self, X):
X = array2d(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
return (-0.5 * (norm2 + np.sum(np.log(self.scalings_), 1))
+ np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
hadim/pygraphml | pygraphml/graph.py | 1 | 6171 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from . import Node
from . import Edge
from collections import deque
class Graph:
"""
Main class which represent a Graph
:param name: name of the graph
"""
def __init__(self, name=""):
"""
"""
self.name = name
self._nodes = []
self._edges = []
self._root = None
self.directed = True
self.i = 0
def DFS_prefix(self, root=None):
"""
Depth-first search.
.. seealso::
`Wikipedia DFS descritpion <http://en.wikipedia.org/wiki/Depth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self._root
return self._DFS_prefix(root)
def _DFS_prefix(self, n, parent=None):
"""
"""
nodes = [n]
n['depth'] = self.i
for c in n.children():
nodes += self._DFS_prefix(c, n)
self.i += 1
return nodes
def BFS(self, root=None):
"""
Breadth-first search.
.. seealso::
`Wikipedia BFS descritpion <http://en.wikipedia.org/wiki/Breadth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self.root()
queue = deque()
queue.append(root)
nodes = []
while len(queue) > 0:
x = queue.popleft()
nodes.append(x)
for child in x.children():
queue.append(child)
return nodes
def get_depth(self, node):
"""
"""
depth = 0
while node.parent() and node != self.root():
node = node.parent()[0]
depth += 1
return depth
def nodes(self, ):
"""
"""
return self._nodes
def edges(self, ):
"""
"""
return self._edges
def children(self, node):
"""
"""
return node.children()
def add_node(self, label="", id=None):
"""
"""
n = Node(id)
n['label'] = label
self._nodes.append(n)
return n
def add_edge(self, n1, n2, directed=False):
"""
"""
if n1 not in self._nodes:
raise Test("fff")
if n2 not in self._nodes:
raise Test("fff")
e = Edge(n1, n2, directed)
self._edges.append(e)
return e
def add_edge_by_id(self, id1, id2):
try:
n1 = next(n for n in self._nodes if n.id == id1)
except StopIteration:
raise ValueError('Graph has no node with ID {}'.format(id1))
try:
n2 = next(n for n in self._nodes if n.id == id2)
except StopIteration:
raise ValueError('Graph has no node with ID {}'.format(id2))
return self.add_edge(n1, n2)
def add_edge_by_label(self, label1, label2):
"""
"""
n1 = None
n2 = None
for n in self._nodes:
if n['label'] == label1:
n1 = n
if n['label'] == label2:
n2 = n
if n1 and n2:
return self.add_edge(n1, n2)
else:
return
def set_root(self, node):
"""
"""
self._root = node
def root(self):
"""
"""
return self._root
def set_root_by_attribute(self, value, attribute='label'):
"""
"""
for n in self.nodes():
if n[attribute] in value:
self.set_root(n)
return n
def get_attributs(self):
"""
"""
attr = []
attr_obj = []
for n in self.nodes():
for a in n.attr:
if a not in attr:
attr.append(a)
attr_obj.append(n.attr[a])
for e in self.edges():
for a in e.attr:
if a not in attr:
attr.append(a)
attr_obj.append(e.attr[a])
return attr_obj
def show(self, show_label=False):
"""
"""
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
G = nx.Graph()
for n in self._nodes:
if show_label:
n_label = n['label']
else:
n_label = n.id
G.add_node(n_label)
for e in self._edges:
if show_label:
n1_label = e.node1['label']
n2_label = e.node2['label']
else:
n1_label = e.node1.id
n2_label = e.node2.id
G.add_edge(n1_label, n2_label)
nx.draw(G)
if show_label:
nx.draw_networkx_labels(G, pos=nx.spring_layout(G))
plt.show()
class NoDupesGraph(Graph):
'''Add nodes without worrying if it is a duplicate.
Add edges without worrying if nodes exist '''
def __init__(self,*args,**kwargs):
Graph.__init__(self,*args,**kwargs)
self._nodes = {}
def nodes(self):
return self._nodes.values()
def add_node(self,label):
'''Return a node with label. Create node if label is new'''
try:
n = self._nodes[label]
except KeyError:
n = Node()
n['label'] = label
self._nodes[label]=n
return n
def add_edge(self, n1_label, n2_label,directed=False):
"""
Get or create edges using get_or_create_node
"""
n1 = self.add_node(n1_label)
n2 = self.add_node(n2_label)
e = Edge(n1, n2, directed)
self._edges.append(e)
return e
def flush_empty_nodes(self):
'''not implemented'''
pass
def condense_edges(self):
'''if a node connects to only two edges, combine those
edges and delete the node.
not implemented
'''
pass
| bsd-3-clause |
alexlee-gk/visual_dynamics | scripts/plot_algorithm.py | 1 | 8487 | import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import yaml
from visual_dynamics.utils.iter_util import flatten_tree, unflatten_tree
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--algorithm_fnames', nargs='+', type=str)
parser.add_argument('--progress_csv_paths', nargs='+', type=str)
parser.add_argument('--usetex', '--use_tex', action='store_true')
parser.add_argument('--save', action='store_true')
args = parser.parse_args()
if args.usetex:
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
title_fontsize = 18
fontsize = 14
if args.algorithm_fnames is None:
args.algorithm_fnames = ['algorithm_learning/fqi_nooptfitbias_l2reg0.1_fc_pixel.yaml',
'algorithm_learning/fqi_nooptfitbias_l2reg0.1_local_pixel.yaml',
'algorithm_learning/fqi_nooptfitbias_l2reg0.1_local_level1.yaml',
'algorithm_learning/fqi_nooptfitbias_l2reg0.1_local_level2.yaml',
'algorithm_learning/fqi_nooptfitbias_l2reg0.1_local_level3.yaml',
'algorithm_learning/fqi_nooptfitbias_l2reg0.1_local_level4.yaml',
'algorithm_learning/fqi_nooptfitbias_l2reg0.1_local_level5.yaml']
if args.progress_csv_paths is None:
args.progress_csv_paths = [
['/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_20_20_05_35_0001/progress.csv',
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_27_08_11_23_0001/progress.csv'],
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_19_14_10_04_0001/progress.csv',
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_19_14_09_56_0001/progress.csv',
['/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_19_14_10_03_0001/progress.csv',
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_23_21_32_15_0001/progress.csv'],
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_19_14_10_17_0001/progress.csv',
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_20_14_31_29_0001/progress.csv',
'/home/alex/rll/rllab/data/local/experiment/experiment_2017_03_20_20_05_03_0001/progress.csv']
assert len(args.algorithm_fnames) == 7
assert len(args.progress_csv_paths) == 7
fqi_n_iters = 10
fqi_mean_returns = []
fqi_std_returns = []
for algorithm_fname in args.algorithm_fnames:
with open(algorithm_fname) as algorithm_file:
algorithm_config = yaml.load(algorithm_file)
fqi_mean_returns_ = -np.asarray(algorithm_config['mean_returns'][-(fqi_n_iters + 1):])
fqi_std_returns_ = np.asarray(algorithm_config['std_returns'][-(fqi_n_iters + 1):])
fqi_mean_returns.append(fqi_mean_returns_)
fqi_std_returns.append(fqi_std_returns_)
trpo_n_iters = 50
trpo_iterations = []
trpo_mean_returns = []
trpo_std_returns = []
for progress_csv_path in flatten_tree(args.progress_csv_paths):
with open(progress_csv_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
# divide by sqrt(10) to convert from standard deviation to standard error
trpo_iterations_, trpo_mean_returns_, trpo_std_returns_ = \
zip(*[(int(row['Iteration']), -float(row['AverageValReturn']), float(row['StdValReturn']) / np.sqrt(10)) for row in reader][:(trpo_n_iters + 1)])
trpo_iterations.append(trpo_iterations_)
trpo_mean_returns.append(np.array(trpo_mean_returns_))
trpo_std_returns.append(np.array(trpo_std_returns_))
trpo_iterations = unflatten_tree(args.progress_csv_paths, trpo_iterations)
trpo_mean_returns = unflatten_tree(args.progress_csv_paths, trpo_mean_returns)
trpo_std_returns = unflatten_tree(args.progress_csv_paths, trpo_std_returns)
for i, (trpo_iterations_, trpo_mean_returns_, trpo_std_returns_) in enumerate(zip(trpo_iterations, trpo_mean_returns, trpo_std_returns)):
trpo_iterations_flat = flatten_tree(trpo_iterations_, base_type=int)
if trpo_iterations_flat != list(trpo_iterations_):
assert trpo_iterations_flat == list(range(len(trpo_iterations_flat)))
trpo_iterations[i] = tuple(trpo_iterations_flat[:(trpo_n_iters + 1)])
trpo_mean_returns[i] = np.append(*trpo_mean_returns_)[:(trpo_n_iters + 1)]
trpo_std_returns[i] = np.append(*trpo_std_returns_)[:(trpo_n_iters + 1)]
color_palette = sns.color_palette('Set2', 10)
colors = [color_palette[i] for i in [3, 5, 4, 6, 7, 9, 8]]
labels = ['pixel, fully connected', 'pixel, locally connected', 'VGG conv1_2', 'VGG conv2_2', 'VGG conv3_3', 'VGG conv4_3', 'VGG conv5_3']
if args.usetex:
labels = [label.replace('_', '\hspace{-0.1em}\_\hspace{0.1em}') for label in labels]
fig, (fqi_ax, trpo_ax) = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
# FQI
fqi_batch_size = 10 * 100 # 10 trajectories, 100 time steps per trajectory
fqi_num_samples = fqi_batch_size * np.arange(fqi_n_iters + 1)
for i, (mean_return, std_return, label) in enumerate(zip(fqi_mean_returns, fqi_std_returns, labels)):
fqi_ax.plot(fqi_num_samples, mean_return, label=label, color=colors[i])
fqi_ax.fill_between(fqi_num_samples,
mean_return + std_return / 2.0,
mean_return - std_return / 2.0,
color=colors[i],
alpha=0.3)
fqi_ax.set_xlabel('Number of Training Samples', fontsize=title_fontsize)
fqi_ax.set_ylabel('Average Costs', fontsize=title_fontsize)
fqi_ax.set_yscale('log')
fqi_ax.set_xlim(0, fqi_num_samples[-1])
fqi_ax.xaxis.set_tick_params(labelsize=fontsize)
fqi_ax.yaxis.set_tick_params(labelsize=fontsize)
# plt.axhline(y=min(map(min, fqi_mean_returns + trpo_mean_returns)), color='k', linestyle='--')
fqi_ax.legend(bbox_to_anchor=(1.08, -0.1), loc='upper center', ncol=4, fontsize=fontsize)
fqi_ax2 = fqi_ax.twiny()
fqi_ax2.set_xlabel("FQI Sampling Iteration", fontsize=title_fontsize)
fqi_ax2.set_xlim(fqi_ax.get_xlim())
fqi_ax2.set_xticks(fqi_batch_size * np.arange(fqi_n_iters + 1))
fqi_ax2.set_xticklabels(np.arange(fqi_n_iters + 1), fontsize=fontsize)
# TRPO
trpo_batch_size = 4000
trpo_num_samples = trpo_batch_size * np.arange(trpo_n_iters + 1)
for i, (mean_return, std_return, label) in enumerate(zip(trpo_mean_returns, trpo_std_returns, labels)):
trpo_ax.plot(trpo_num_samples[:len(mean_return)], mean_return, label=label, color=colors[i])
trpo_ax.fill_between(trpo_num_samples[:len(mean_return)],
mean_return + std_return / 2.0,
mean_return - std_return / 2.0,
color=colors[i],
alpha=0.3)
trpo_ax.set_xlabel('Number of Training Samples', fontsize=title_fontsize)
# trpo_ax.set_ylabel('Average Costs')
trpo_ax.set_yscale('log')
trpo_ax.set_xlim(0, trpo_num_samples[-1])
trpo_ax.xaxis.set_tick_params(labelsize=fontsize)
trpo_ax.yaxis.set_tick_params(labelsize=fontsize)
trpo_ax.set_xticks(trpo_batch_size * np.arange(0, trpo_n_iters + 1, 10))
trpo_ax.set_xticklabels(trpo_batch_size * np.arange(0, trpo_n_iters + 1, 10), fontsize=fontsize)
# plt.axhline(y=min(map(min, fqi_mean_returns + trpo_mean_returns)), color='k', linestyle='--')
trpo_ax2 = trpo_ax.twiny()
trpo_ax2.set_xlabel("TRPO Sampling Iteration", fontsize=title_fontsize)
trpo_ax2.set_xlim(trpo_ax.get_xlim())
trpo_ax2.set_xticks(trpo_batch_size * np.arange(0, trpo_n_iters + 1, 5))
trpo_ax2.set_xticklabels(np.arange(0, trpo_n_iters + 1, 5), fontsize=fontsize)
ymins, ymaxs = zip(fqi_ax.get_ylim(), trpo_ax.get_ylim())
ylim = (min(ymins), max(ymaxs))
fqi_ax.set_ylim(ylim)
trpo_ax.set_ylim(ylim)
fqi_ax.grid(b=True, which='both', axis='y')
trpo_ax.grid(b=True, which='both', axis='y')
if args.save:
plt.savefig('/home/alex/Dropbox/visual_servoing/20160322/fqi_trpo_learning_val_trajs.pdf', bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
main()
| mit |
nicolagritti/ACVU_scripts | source/check_piezo_movements.py | 1 | 1793 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 10:31:00 2015
@author: kienle
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
from matplotlib import cm
from generalFunctions import *
from skimage import filters
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
path = 'X:\\Nicola\\160606_noiseTest_beads'
worms = ['C04']#,'C02']
def getPiezo( path, worm, tRow ):
with open( os.path.join( path,worm,tRow.fName+'.txt' ), 'r') as f:
line = ''
while 'Time [ms]' not in line:
line = f.readline()
lines = f.readlines()
t = np.array( [ np.float(i.strip().split('\t')[0]) for i in lines ] ) * 1000
_in = np.array( [ np.float(i.strip().split('\t')[1]) for i in lines ] )
_out = np.array( [ np.float(i.strip().split('\t')[2]) for i in lines ] )
# plt.plot(t,_in)
# plt.plot(t,_out)
# plt.show()
return ( t, _in, _out )
def plotSingleWormData( path, worm, ax1 ):
timesDF = load_data_frame( path, worm + '_01times.pickle' )
gonadPosDF = load_data_frame( path, worm + '_02gonadPos.pickle' )
cellPosDF = load_data_frame( path, worm + '_04cellPos.pickle' )
cellOutDF = load_data_frame( path, worm + '_05cellOut.pickle' )
cellFluoDF = load_data_frame( path, worm + '_06cellFluo.pickle' )
for idx, tRow in timesDF.iterrows():
print(tRow.fName)
(t,_in,_out) = getPiezo( path, worm, tRow )
ax1.plot( t, _in, '-b', lw=2 )
ax1.plot( t, _out, '-g', lw=2 )
### setup figure for the timeseries
fig1 = plt.figure(figsize=(5.8,3.8))
ax1 = fig1.add_subplot(111)
fig1.subplots_adjust(left=0.15, right=.95, top=.95, bottom=0.15)
for tl in ax1.get_xticklabels():
tl.set_fontsize(18)
for tl in ax1.get_yticklabels():
tl.set_fontsize(18)
ax1.set_ylim((-17.2,0.5))
ax1.set_xlim((-5,500))
plotSingleWormData(path,worms[0],ax1)
plt.show() | gpl-3.0 |
jwiggins/scikit-image | doc/examples/features_detection/plot_local_binary_pattern.py | 12 | 6776 | """
===============================================
Local Binary Pattern for texture classification
===============================================
In this example, we will see how to classify textures based on LBP (Local
Binary Pattern). LBP looks at points surrounding a central point and tests
whether the surrounding points are greater than or less than the central point
(i.e. gives a binary result).
Before trying out LBP on an image, it helps to look at a schematic of LBPs.
The below code is just used to plot the schematic.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
METHOD = 'uniform'
plt.rcParams['font.size'] = 9
def plot_circle(ax, center, radius, color):
circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
ax.add_patch(circle)
def plot_lbp_model(ax, binary_values):
"""Draw the schematic for a local binary pattern."""
# Geometry spec
theta = np.deg2rad(45)
R = 1
r = 0.15
w = 1.5
gray = '0.5'
# Draw the central pixel.
plot_circle(ax, (0, 0), radius=r, color=gray)
# Draw the surrounding pixels.
for i, facecolor in enumerate(binary_values):
x = R * np.cos(i * theta)
y = R * np.sin(i * theta)
plot_circle(ax, (x, y), radius=r, color=str(facecolor))
# Draw the pixel grid.
for x in np.linspace(-w, w, 4):
ax.axvline(x, color=gray)
ax.axhline(x, color=gray)
# Tweak the layout.
ax.axis('image')
ax.axis('off')
size = w + 0.2
ax.set_xlim(-size, size)
ax.set_ylim(-size, size)
fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
binary_patterns = [np.zeros(8),
np.ones(8),
np.hstack([np.ones(4), np.zeros(4)]),
np.hstack([np.zeros(3), np.ones(5)]),
[1, 0, 0, 1, 1, 1, 0, 0]]
for ax, values, name in zip(axes, binary_patterns, titles):
plot_lbp_model(ax, values)
ax.set_title(name)
"""
.. image:: PLOT2RST.current_figure
The figure above shows example results with black (or white) representing
pixels that are less (or more) intense than the central pixel. When surrounding
pixels are all black or all white, then that image region is flat (i.e.
featureless). Groups of continuous black or white pixels are considered
"uniform" patterns that can be interpreted as corners or edges. If pixels
switch back-and-forth between black and white pixels, the pattern is considered
"non-uniform".
When using LBP to detect texture, you measure a collection of LBPs over an
image patch and look at the distribution of these LBPs. Lets apply LBP to
a brick texture.
"""
from skimage.transform import rotate
from skimage.feature import local_binary_pattern
from skimage import data
from skimage.color import label2rgb
# settings for LBP
radius = 3
n_points = 8 * radius
def overlay_labels(image, lbp, labels):
mask = np.logical_or.reduce([lbp == each for each in labels])
return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
def highlight_bars(bars, indexes):
for i in indexes:
bars[i].set_facecolor('r')
image = data.load('brick.png')
lbp = local_binary_pattern(image, n_points, radius, METHOD)
def hist(ax, lbp):
n_bins = lbp.max() + 1
return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()
titles = ('edge', 'flat', 'corner')
w = width = radius - 1
edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
i_14 = n_points // 4 # 1/4th of the histogram
i_34 = 3 * (n_points // 4) # 3/4th of the histogram
corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
list(range(i_34 - w, i_34 + w + 1)))
label_sets = (edge_labels, flat_labels, corner_labels)
for ax, labels in zip(ax_img, label_sets):
ax.imshow(overlay_labels(image, lbp, labels))
for ax, labels, name in zip(ax_hist, label_sets, titles):
counts, _, bars = hist(ax, lbp)
highlight_bars(bars, labels)
ax.set_ylim(ymax=np.max(counts[:-1]))
ax.set_xlim(xmax=n_points + 2)
ax.set_title(name)
ax_hist[0].set_ylabel('Percentage')
for ax in ax_img:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The above plot highlights flat, edge-like, and corner-like regions of the
image.
The histogram of the LBP result is a good measure to classify textures. Here,
we test the histogram distributions against each other using the
Kullback-Leibler-Divergence.
"""
# settings for LBP
radius = 2
n_points = 8 * radius
def kullback_leibler_divergence(p, q):
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and(p != 0, q != 0)
return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
def match(refs, img):
best_score = 10
best_name = None
lbp = local_binary_pattern(img, n_points, radius, METHOD)
n_bins = lbp.max() + 1
hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins))
for name, ref in refs.items():
ref_hist, _ = np.histogram(ref, normed=True, bins=n_bins,
range=(0, n_bins))
score = kullback_leibler_divergence(hist, ref_hist)
if score < best_score:
best_score = score
best_name = name
return best_name
brick = data.load('brick.png')
grass = data.load('grass.png')
wall = data.load('rough-wall.png')
refs = {
'brick': local_binary_pattern(brick, n_points, radius, METHOD),
'grass': local_binary_pattern(grass, n_points, radius, METHOD),
'wall': local_binary_pattern(wall, n_points, radius, METHOD)
}
# classify rotated textures
print('Rotated images matched against references using LBP:')
print('original: brick, rotated: 30deg, match result: ',
match(refs, rotate(brick, angle=30, resize=False)))
print('original: brick, rotated: 70deg, match result: ',
match(refs, rotate(brick, angle=70, resize=False)))
print('original: grass, rotated: 145deg, match result: ',
match(refs, rotate(grass, angle=145, resize=False)))
# plot histograms of LBP of textures
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
figsize=(9, 6))
plt.gray()
ax1.imshow(brick)
ax1.axis('off')
hist(ax4, refs['brick'])
ax4.set_ylabel('Percentage')
ax2.imshow(grass)
ax2.axis('off')
hist(ax5, refs['grass'])
ax5.set_xlabel('Uniform LBP values')
ax3.imshow(wall)
ax3.axis('off')
hist(ax6, refs['wall'])
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
lukauskas/scipy | scipy/signal/windows.py | 32 | 53971 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
Insight-book/data-science-from-scratch | scratch/recommender_systems.py | 2 | 12803 | users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
from collections import Counter
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests)
from typing import List, Tuple
def most_popular_new_interests(
user_interests: List[str],
max_results: int = 5) -> List[Tuple[str, int]]:
suggestions = [(interest, frequency)
for interest, frequency in popular_interests.most_common()
if interest not in user_interests]
return suggestions[:max_results]
unique_interests = sorted({interest
for user_interests in users_interests
for interest in user_interests})
assert unique_interests[:6] == [
'Big Data',
'C++',
'Cassandra',
'HBase',
'Hadoop',
'Haskell',
# ...
]
def make_user_interest_vector(user_interests: List[str]) -> List[int]:
"""
Given a list ofinterests, produce a vector whose ith element is 1
if unique_interests[i] is in the list, 0 otherwise
"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_vectors = [make_user_interest_vector(user_interests)
for user_interests in users_interests]
from scratch.nlp import cosine_similarity
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_vectors]
for interest_vector_i in user_interest_vectors]
# Users 0 and 9 share interests in Hadoop, Java, and Big Data
assert 0.56 < user_similarities[0][9] < 0.58, "several shared interests"
# Users 0 and 8 share only one interest: Big Data
assert 0.18 < user_similarities[0][8] < 0.20, "only one shared interest"
def most_similar_users_to(user_id: int) -> List[Tuple[int, float]]:
pairs = [(other_user_id, similarity) # Find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity.
return sorted(pairs, # Sort them
key=lambda pair: pair[-1], # most similar
reverse=True) # first.
most_similar_to_zero = most_similar_users_to(0)
user, score = most_similar_to_zero[0]
assert user == 9
assert 0.56 < score < 0.57
user, score = most_similar_to_zero[1]
assert user == 1
assert 0.33 < score < 0.34
from collections import defaultdict
def user_based_suggestions(user_id: int,
include_current_interests: bool = False):
# Sum up the similarities.
suggestions: Dict[str, float] = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# Convert them to a sorted list.
suggestions = sorted(suggestions.items(),
key=lambda pair: pair[-1], # weight
reverse=True)
# And (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
ubs0 = user_based_suggestions(0)
interest, score = ubs0[0]
assert interest == 'MapReduce'
assert 0.56 < score < 0.57
interest, score = ubs0[1]
assert interest == 'MongoDB'
assert 0.50 < score < 0.51
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_vectors]
for j, _ in enumerate(unique_interests)]
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id: int):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda pair: pair[-1],
reverse=True)
msit0 = most_similar_interests_to(0)
assert msit0[0][0] == 'Hadoop'
assert 0.815 < msit0[0][1] < 0.817
assert msit0[1][0] == 'Java'
assert 0.666 < msit0[1][1] < 0.667
def item_based_suggestions(user_id: int,
include_current_interests: bool = False):
# Add up the similar interests
suggestions = defaultdict(float)
user_interest_vector = user_interest_vectors[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
# Sort them by weight
suggestions = sorted(suggestions.items(),
key=lambda pair: pair[-1],
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
[('MapReduce', 1.861807319565799),
('Postgres', 1.3164965809277263),
('MongoDB', 1.3164965809277263),
('NoSQL', 1.2844570503761732),
('programming languages', 0.5773502691896258),
('MySQL', 0.5773502691896258),
('Haskell', 0.5773502691896258),
('databases', 0.5773502691896258),
('neural networks', 0.4082482904638631),
('deep learning', 0.4082482904638631),
('C++', 0.4082482904638631),
('artificial intelligence', 0.4082482904638631),
('Python', 0.2886751345948129),
('R', 0.2886751345948129)]
ibs0 = item_based_suggestions(0)
assert ibs0[0][0] == 'MapReduce'
assert 1.86 < ibs0[0][1] < 1.87
assert ibs0[1][0] in ('Postgres', 'MongoDB') # A tie
assert 1.31 < ibs0[1][1] < 1.32
def main():
# Replace this with the locations of your files
# This points to the current directory, modify if your files are elsewhere.
MOVIES = "u.item" # pipe-delimited: movie_id|title|...
RATINGS = "u.data" # tab-delimited: user_id, movie_id, rating, timestamp
from typing import NamedTuple
class Rating(NamedTuple):
user_id: str
movie_id: str
rating: float
import csv
# We specify this encoding to avoid a UnicodeDecodeError.
# see: https://stackoverflow.com/a/53136168/1076346
with open(MOVIES, encoding="iso-8859-1") as f:
reader = csv.reader(f, delimiter="|")
movies = {movie_id: title for movie_id, title, *_ in reader}
# Create a list of [Rating]
with open(RATINGS, encoding="iso-8859-1") as f:
reader = csv.reader(f, delimiter="\t")
ratings = [Rating(user_id, movie_id, float(rating))
for user_id, movie_id, rating, _ in reader]
# 1682 movies rated by 943 users
assert len(movies) == 1682
assert len(list({rating.user_id for rating in ratings})) == 943
import re
# Data structure for accumulating ratings by movie_id
star_wars_ratings = {movie_id: []
for movie_id, title in movies.items()
if re.search("Star Wars|Empire Strikes|Jedi", title)}
# Iterate over ratings, accumulating the Star Wars ones
for rating in ratings:
if rating.movie_id in star_wars_ratings:
star_wars_ratings[rating.movie_id].append(rating.rating)
# Compute the average rating for each movie
avg_ratings = [(sum(title_ratings) / len(title_ratings), movie_id)
for movie_id, title_ratings in star_wars_ratings.items()]
# And then print them in order
for avg_rating, movie_id in sorted(avg_ratings, reverse=True):
print(f"{avg_rating:.2f} {movies[movie_id]}")
import random
random.seed(0)
random.shuffle(ratings)
split1 = int(len(ratings) * 0.7)
split2 = int(len(ratings) * 0.85)
train = ratings[:split1] # 70% of the data
validation = ratings[split1:split2] # 15% of the data
test = ratings[split2:] # 15% of the data
avg_rating = sum(rating.rating for rating in train) / len(train)
baseline_error = sum((rating.rating - avg_rating) ** 2
for rating in test) / len(test)
# This is what we hope to do better than
assert 1.26 < baseline_error < 1.27
# Embedding vectors for matrix factorization model
from scratch.deep_learning import random_tensor
EMBEDDING_DIM = 2
# Find unique ids
user_ids = {rating.user_id for rating in ratings}
movie_ids = {rating.movie_id for rating in ratings}
# Then create a random vector per id
user_vectors = {user_id: random_tensor(EMBEDDING_DIM)
for user_id in user_ids}
movie_vectors = {movie_id: random_tensor(EMBEDDING_DIM)
for movie_id in movie_ids}
# Training loop for matrix factorization model
from typing import List
import tqdm
from scratch.linear_algebra import dot
def loop(dataset: List[Rating],
learning_rate: float = None) -> None:
with tqdm.tqdm(dataset) as t:
loss = 0.0
for i, rating in enumerate(t):
movie_vector = movie_vectors[rating.movie_id]
user_vector = user_vectors[rating.user_id]
predicted = dot(user_vector, movie_vector)
error = predicted - rating.rating
loss += error ** 2
if learning_rate is not None:
# predicted = m_0 * u_0 + ... + m_k * u_k
# So each u_j enters output with coefficent m_j
# and each m_j enters output with coefficient u_j
user_gradient = [error * m_j for m_j in movie_vector]
movie_gradient = [error * u_j for u_j in user_vector]
# Take gradient steps
for j in range(EMBEDDING_DIM):
user_vector[j] -= learning_rate * user_gradient[j]
movie_vector[j] -= learning_rate * movie_gradient[j]
t.set_description(f"avg loss: {loss / (i + 1)}")
learning_rate = 0.05
for epoch in range(20):
learning_rate *= 0.9
print(epoch, learning_rate)
loop(train, learning_rate=learning_rate)
loop(validation)
loop(test)
from scratch.working_with_data import pca, transform
original_vectors = [vector for vector in movie_vectors.values()]
components = pca(original_vectors, 2)
ratings_by_movie = defaultdict(list)
for rating in ratings:
ratings_by_movie[rating.movie_id].append(rating.rating)
vectors = [
(movie_id,
sum(ratings_by_movie[movie_id]) / len(ratings_by_movie[movie_id]),
movies[movie_id],
vector)
for movie_id, vector in zip(movie_vectors.keys(),
transform(original_vectors, components))
]
# Print top 25 and bottom 25 by first principal component
print(sorted(vectors, key=lambda v: v[-1][0])[:25])
print(sorted(vectors, key=lambda v: v[-1][0])[-25:])
if __name__ == "__main__": main() | unlicense |
lfairchild/PmagPy | programs/hysteresis_magic2.py | 2 | 13221 | #!/usr/bin/env python
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in rmag_hystereis format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specify input file, default is agm_measurements.txt
-fh: specify rmag_hysteresis.txt input file
-F: specify output file, default is rmag_hysteresis.txt
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
PLT = 1
plots = 0
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = '.'
fmt = 'svg'
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind = args.index("-usr")
user = args[ind+1]
if '-f' in args:
ind = args.index("-f")
meas_file = args[ind+1]
if '-F' in args:
ind = args.index("-F")
rmag_out = args[ind+1]
if '-fh' in args:
ind = args.index("-fh")
rmag_file = args[ind+1]
rmag_file = dir_path+'/'+rmag_file
if '-P' in args:
PLT = 0
irm_init, imag_init = -1, -1
if '-sav' in args:
verbose = 0
plots = 1
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = 0
plots = 1
if '-fmt' in args:
ind = args.index("-fmt")
fmt = args[ind+1]
rmag_out = dir_path+'/'+rmag_out
meas_file = dir_path+'/'+meas_file
rmag_rem = dir_path+"/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(main.__doc__)
print('bad file')
sys.exit()
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
if verbose:
if verbose and PLT:
print("Plots may be on top of each other - use mouse to place ")
if PLT:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
imag_init = 0
irm_init = 0
else:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
#
if rmag_file != "":
hyst_data, file_type = pmag.magic_read(rmag_file)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
k = 0
locname = ''
if pltspec != "":
k = sids.index(pltspec)
print(sids[k])
while k < len(sids):
s = sids[k]
if verbose and PLT:
print(s, k+1, 'out of ', len(sids))
#
#
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == s and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == s and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == s and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
#
# now plot the hysteresis curve
#
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
hpars = pmagplotlib.plot_hdd(HDD, B, M, e)
if verbose and PLT:
pmagplotlib.draw_figs(HDD)
#
# get prior interpretations from hyst_data
if rmag_file != "":
hpars_prior = {}
for rec in hyst_data:
if rec['magic_experiment_names'] == e:
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
hpars_prior['hysteresis_mr_moment'] = rec['hysteresis_mr_moment']
hpars_prior['hysteresis_ms_moment'] = rec['hysteresis_ms_moment']
hpars_prior['hysteresis_bc'] = rec['hysteresis_bc']
hpars_prior['hysteresis_bcr'] = rec['hysteresis_bcr']
break
if verbose:
pmagplotlib.plot_hpars(HDD, hpars_prior, 'ro')
else:
if verbose:
pmagplotlib.plot_hpars(HDD, hpars, 'bs')
HystRec['hysteresis_mr_moment'] = hpars['hysteresis_mr_moment']
HystRec['hysteresis_ms_moment'] = hpars['hysteresis_ms_moment']
HystRec['hysteresis_bc'] = hpars['hysteresis_bc']
HystRec['hysteresis_bcr'] = hpars['hysteresis_bcr']
HystRec['hysteresis_xhf'] = hpars['hysteresis_xhf']
HystRec['magic_experiment_names'] = e
HystRec['magic_software_packages'] = version_num
if hpars["magic_method_codes"] not in hmeths:
hmeths.append(hpars["magic_method_codes"])
methods = ""
for meth in hmeths:
methods = methods+meth.strip()+":"
HystRec["magic_method_codes"] = methods[:-1]
HystRec["er_citation_names"] = "This study"
HystRecs.append(HystRec)
#
if len(Bdcd) > 0:
rmeths = []
for meth in meths:
rmeths.append(meth)
if verbose and PLT:
print('plotting IRM')
if irm_init == 0:
HDD['irm'] = 5
pmagplotlib.plot_init(HDD['irm'], 5, 5)
irm_init = 1
rpars = pmagplotlib.plot_irm(HDD['irm'], Bdcd, Mdcd, irm_exp)
RemRec['remanence_mr_moment'] = rpars['remanence_mr_moment']
RemRec['remanence_bcr'] = rpars['remanence_bcr']
RemRec['magic_experiment_names'] = irm_exp
if rpars["magic_method_codes"] not in meths:
meths.append(rpars["magic_method_codes"])
methods = ""
for meth in rmeths:
methods = methods+meth.strip()+":"
RemRec["magic_method_codes"] = methods[:-1]
RemRec["er_citation_names"] = "This study"
RemRecs.append(RemRec)
else:
if irm_init:
pmagplotlib.clearFIG(HDD['irm'])
if len(Bimag) > 0:
if verbose:
print('plotting initial magnetization curve')
# first normalize by Ms
Mnorm = []
for m in Mimag:
Mnorm.append(m / float(hpars['hysteresis_ms_moment']))
if imag_init == 0:
HDD['imag'] = 4
pmagplotlib.plot_init(HDD['imag'], 5, 5)
imag_init = 1
pmagplotlib.plot_imag(HDD['imag'], Bimag, Mnorm, imag_exp)
else:
if imag_init:
pmagplotlib.clearFIG(HDD['imag'])
#
files = {}
if plots:
if pltspec != "":
s = pltspec
files = {}
for key in list(HDD.keys()):
files[key] = locname+'_'+s+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files)
if pltspec != "":
sys.exit()
if verbose and PLT:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
files[key] = locname+'_'+s+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
sys.exit()
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0 and len(Bdcd) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if rmag_out == "" and ans == 's' and verbose:
really = input(
" Do you want to overwrite the existing rmag_hystersis.txt file? 1/[0] ")
if really == "":
print('i thought not - goodbye')
sys.exit()
rmag_out = "rmag_hysteresis.txt"
if len(HystRecs) > 0:
pmag.magic_write(rmag_out, HystRecs, "rmag_hysteresis")
if verbose:
print("hysteresis parameters saved in ", rmag_out)
if len(RemRecs) > 0:
pmag.magic_write(rmag_rem, RemRecs, "rmag_remanence")
if verbose:
print("remanence parameters saved in ", rmag_rem)
if __name__ == "__main__":
main()
| bsd-3-clause |
nicproulx/mne-python | tutorials/plot_background_filtering.py | 3 | 43173 | # -*- coding: utf-8 -*-
r"""
.. _tut_background_filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus [1]_ and
Ifeachor and Jervis [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* 2015 [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`tut_artifacts_filter` tutorial.
.. contents::
:local:
Problem statement
=================
The practical issues with filtering electrophysiological data are covered
well by Widmann *et al.* in [7]_, in a follow-up to an article where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase SNR, but if it is not used carefully,
it can distort data. Here we hope to cover some filtering basics so
users can better understand filtering tradeoffs, and why MNE-Python has
chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \\
&= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\
&= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over:
1. The numerator coefficients :math:`b_k`, which get multiplied by
the previous input :math:`x(n-k)` values, and
2. The denominator coefficients :math:`a_k`, which get multiplied by
the previous output :math:`y(n-k)` values.
Note that these summations in :eq:`summations` correspond nicely to
(1) a weighted `moving average`_ and (2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in [1]_, FIR and IIR have different tradeoffs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
2015 [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002 [2]_, p. 321),
...FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always tradeoffs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency tradeoff, and it will
show up below.
FIR Filters
===========
First we will focus first on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try designing a low-pass filter, and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG data.
import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a value of 1 in
# the pass-band (up to frequency :math:`f_p`) and a value of 0 in the stop-band
# (down to frequency :math:`f_s`) such that :math:`f_p=f_s=40` Hz here
# (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontunity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in frequency is actually sinc_ function
# in time, which requires an infinite number of samples, and thus infinite
# time, to represent. So although this filter has ideal frequency suppression,
# it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 sec)', flim=flim)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 sec) gets us a
# bit better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 sec)', flim=flim)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 sec),
# with a resulting larger x-axis:
n = int(round(10. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 sec)', flim=flim)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire second. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`, `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightfroward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precisel control of all frequency
# regions, here we will use and explore primarily windowed FIR
# design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a
# smoother slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 sec filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (1.0 sec)',
flim=flim)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.5 sec)',
flim=flim)
###############################################################################
# But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.2 sec)',
flim=flim)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50-Hz transition (0.2 sec)',
flim=flim)
###############################################################################
# So far we have only discussed *acausal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, espcially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming acausal), minimum-phase filters do not require any
# compensation to achieve small delays in the passband. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the ``minimum_phase`` function (that will be in SciPy 0.19's
# :mod:`scipy.signal`), and note that the falloff is not as steep:
h_min = mne.fixes.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random + line). Note that the original, clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p)
x_shallow = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim)
###############################################################################
# This is actually set to become the default type of filter used in MNE-Python
# in 0.14 (see :ref:`tut_filtering_in_python`).
#
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur)
x_steep = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum')
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filhters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially on signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (sec)', xlim=t[[0, -1]])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.abs(X)))
axes[1].set(xlim=flim)
yticks = np.arange(6) / -30.
yticklabels = ['Original', 'Noisy', 'FIR-shallow (0.14)', 'FIR-steep (0.13)',
'FIR-steep (MNE-C)', 'Minimum-phase']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
plot_signal(x_mne_c, offset=yticks[4])
plot_signal(x_min, offset=yticks[5])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.200, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter, and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few orders of filter,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# acausal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if acausal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim)
# Eventually this will just be from scipy signal.sosfiltfilt, but 0.18 is
# not widely adopted yet (as of June 2016), so we use our wrapper...
sosfiltfilt = mne.fixes.get_sosfiltfilt()
x_shallow = sosfiltfilt(sos, x)
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given in tut_filtering_basics_ use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used when possible to do IIR filtering.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=8', flim=flim)
x_steep = sosfiltfilt(sos, x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=1) # dB of acceptable pass-band ripple
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim)
###############################################################################
# And if we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=6)
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are acausal (zero-phase), can make
# activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen 2011 [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to smulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet 2012 [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger 2012 [6]_ that
# the problematic low-pass filters from VanRullen 2011 [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* 2012 [4]_ to:
#
# "...generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* 2015 [7]_ also came to suggest a 0.1 Hz
# highpass. And more evidence followed in Tanner *et al.* 2015 [8]_ of such
# distortions. Using data from language ERP studies of semantic and syntactic
# processing (i.e., N400 and P600), using a high-pass above 0.3 Hz caused
# significant effects to be introduced implausibly early when compared to the
# unfiltered data. From this, the authors suggested the optimal high-pass
# value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* 2015 [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV, onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass and
# high-pass filters... No visible distortion to the original waveform
# [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = 'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck 2010 [12]_,
# they found that applying a 1 Hz high-pass decreased the probaility of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 HZ or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving :ref:`ch_sample_data`,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* 2015 [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* 2016 [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* [10]_ rebutted that baseline correction can correct for
# problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In respose, Maess *et al.* 2016 [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multielectrode recordings
# the topology (i.e., spatial pattiern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin2`. In Widmann *et al.* 2015 [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 45.0 | 11.25 | 5.0 |
# +------------------+-------------------+-------------------+
# | 48.0 | 12.0 | 2.0 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 6.2, 6.6, or 11.0 for the Hann, Hamming,
# or Blackman windows, respectively as selected by the ``fir_window``
# argument.
#
# .. note:: These multiplicative factors are double what is given in
# Ifeachor and Jervis [2]_ (p. 357). The window functions have a
# smearing effect on the frequency response; I&J thus take the
# approach of setting the stop frequency as
# :math:`f_s = f_p + f_{trans} / 2.`, but our stated definitions of
# :math:`f_s` and :math:`f_{trans}` do not
# allow us to do this in a nice way. Instead, we increase our filter
# length to achieve acceptable (20+ dB) attenuation by
# :math:`f_s = f_p + f_{trans}`, and excellent (50+ dB)
# attenuation by :math:`f_s + f_{trans}` (and usually earlier).
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut_artifacts_filter`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M-EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in [7]_. Briefly:
#
# * EEGLAB
# MNE-Python in 0.14 defaults to behavior very similar to that of EEGLAB,
# see the `EEGLAB filtering FAQ`_ for more information.
# * Fieldrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more inforamtion, see e.g. `FieldTrip band-pass documentation`_.
#
# Summary
# =======
#
# When filtering, there are always tradeoffs that should be considered.
# One important tradeoff is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. http://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. http://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artefacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: http://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: http://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: http://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _fieldtrip band-pass documentation: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter # noqa
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/base.py | 11 | 18381 | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as _ChangedBehaviorWarning
@deprecated("ChangedBehaviorWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class ChangedBehaviorWarning(_ChangedBehaviorWarning):
pass
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin(object):
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Returns the score of the model on the data X
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
score: float
"""
pass
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| unlicense |
IshankGulati/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
jaidevd/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 82 | 4768 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits=3):
cv = KFold(n_splits=n_splits)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_splits
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
frank-tancf/scikit-learn | examples/decomposition/plot_pca_iris.py | 29 | 1484 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | sklearn/datasets/lfw.py | 31 | 19544 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
jor-/scipy | doc/source/tutorial/examples/optimize_global_1.py | 15 | 1752 | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=200, iters=5,
sampling_method='sobol')
fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_point(res, marker='o', color=None):
ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
plot_point(results['BH'], color='y') # basinhopping - yellow
plot_point(results['DE'], color='c') # differential_evolution - cyan
plot_point(results['DA'], color='w') # dual_annealing. - white
# SHGO produces multiple minima, plot them all (with a smaller marker size)
plot_point(results['shgo'], color='r', marker='+')
plot_point(results['shgo_sobol'], color='r', marker='x')
for i in range(results['shgo_sobol'].xl.shape[0]):
ax.plot(512 + results['shgo_sobol'].xl[i, 0],
512 + results['shgo_sobol'].xl[i, 1],
'ro', ms=2)
ax.set_xlim([-4, 514*2])
ax.set_ylim([-4, 514*2])
fig.tight_layout()
plt.show()
| bsd-3-clause |
MatteusDeloge/opengrid | opengrid/library/analysis.py | 1 | 1817 | # -*- coding: utf-8 -*-
"""
General analysis functions.
Try to write all methods such that they take a dataframe as input
and return a dataframe or list of dataframes.
"""
import numpy as np
import pdb
import pandas as pd
from opengrid.library.misc import *
def daily_min(df, starttime=None, endtime=None):
"""
Parameters
----------
df: pandas.DataFrame
With pandas.DatetimeIndex and one or more columns
starttime, endtime :datetime.time objects
For each day, only consider the time between starttime and endtime
If None, use begin of day/end of day respectively
Returns
-------
df_day : pandas.DataFrame with daily datetimindex and minima
"""
df_daily_list = split_by_day(df, starttime, endtime)
# create a dataframe with correct index
df_res = pd.DataFrame(index=df.resample(rule='D', how='max').index, columns=df.columns)
# fill it up, day by day
for i,df_day in enumerate(df_daily_list):
df_res.iloc[i,:] = df_day.min()
return df_res
def daily_max(df, starttime=None, endtime=None):
"""
Parameters
----------
df: pandas.DataFrame
With pandas.DatetimeIndex and one or more columns
starttime, endtime :datetime.time objects
For each day, only consider the time between starttime and endtime
If None, use begin of day/end of day respectively
Returns
-------
df_day : pandas.DataFrame with daily datetimeindex and maxima
"""
df_daily_list = split_by_day(df, starttime, endtime)
# create a dataframe with correct index
df_res = pd.DataFrame(index=df.resample(rule='D', how='max').index, columns=df.columns)
# fill it up, day by day
for i,df_day in enumerate(df_daily_list):
df_res.iloc[i,:] = df_day.max()
return df_res | apache-2.0 |
qiime2/q2-types | q2_types/plugin_setup.py | 1 | 1462 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import importlib
import pandas as pd
import qiime2.plugin
import qiime2.sdk
from q2_types import __version__
citations = qiime2.plugin.Citations.load('citations.bib', package='q2_types')
plugin = qiime2.plugin.Plugin(
name='types',
version=__version__,
website='https://github.com/qiime2/q2-types',
package='q2_types',
description=('This QIIME 2 plugin defines semantic types and '
'transformers supporting microbiome analysis.'),
short_description='Plugin defining types for microbiome analysis.'
)
plugin.register_views(pd.Series, pd.DataFrame,
citations=[citations['mckinney-proc-scipy-2010']])
importlib.import_module('q2_types.feature_table')
importlib.import_module('q2_types.distance_matrix')
importlib.import_module('q2_types.tree')
importlib.import_module('q2_types.ordination')
importlib.import_module('q2_types.sample_data')
importlib.import_module('q2_types.feature_data')
importlib.import_module('q2_types.per_sample_sequences')
importlib.import_module('q2_types.multiplexed_sequences')
importlib.import_module('q2_types.bowtie2')
| bsd-3-clause |
kaleoyster/ProjectNBI | nbi-datacenterhub/top-level.py | 1 | 7152 | """
The script updates new cases to the top level file for the data-center hub
"""
import pandas as pd
import csv
import os
import numpy as np
from collections import OrderedDict
from collections import defaultdict
import sys
__author__ = 'Akshay Kale'
__copyright__ = 'GPL'
__credits__ = ['Jonathan Monical']
__email__ = 'akale@unomaha.edu'
list_of_args = [arg for arg in sys.argv]
codename, case_file, top_file, nbi_file, YEAR = list_of_args
# Import relevant files
df_cases = pd.read_csv(case_file, skiprows=[0, 2, 3], low_memory=False)
df_top_level = pd.read_csv(top_file, low_memory=False)
df_nbi = pd.read_csv(nbi_file, low_memory=False)
# Get Cases ids from all the files
set_cases_top = set(df_top_level['Case Id'])
set_cases_nbi = set(df_nbi['Case Id'])
set_cases_cases = set(df_cases['Case ID'])
new_cases = set_cases_cases - set_cases_top
# Check if the cases exist in the curent nbi survey records (DEAD CODE - Not used anywhere)
def check_case_in_cases(new_cases):
if new_cases in set_cases_nbi:
return True
return False
# Condition rating coding
condition_rating_dict = {
1: 1,
'1': 1,
2: 2,
'2': 2,
3: 3,
'3': 3,
4: 4,
'4': 4,
5: 5,
'5': 5,
6: 6,
'6': 6,
7: 7,
'7': 7,
8: 8,
'8':8,
'9': 9,
9: 9,
0: 0,
'0': 0,
'N': None,
}
# Select columns
selected_columns = [
'Case Name', # Case Name
'Longitude', # Longitude
'Latitude', # Latitude
'Built in', # Year Built
'Material', # Material
'Construction Type', # Construction
'ADT', # ADT
'ADTT (% ADT)', # ADTT
'Deck', # Deck
'Super', # Superstructure
'Sub' # Substructure
]
## Corresponding columns of selected columns in the top-level record
top_level_columns = [
'Case Name', # Case Name
'Longitude', # Longitude
'Latitude', # Latitude
'Year Built', # Year Built
'Material', # Material
'Construction Type', # Construction
'ADT', # ADT
'ADTT', # ADTT
'Deck', # Deck
'Superstructure', # Superstructure
'Substructure' # Substructure
]
identity_columns = [
'Case Name',
'Longitude',
'Latitude',
'Year Built',
'Material',
'Construction Type'
]
# Select the key: case ids
key = 'Case Id'
# Create list of the dictionaries
def create_list_of_dicts(columns, key):
df_keys = df_nbi[key]
list_of_dictionary = []
for column in columns:
temp_dict = defaultdict()
df_column = df_nbi[column]
for key, value in zip(df_keys, df_column):
temp_dict[key] = value
list_of_dictionary.append(temp_dict)
return list_of_dictionary
list_of_dict = create_list_of_dicts(selected_columns, key)
# Create a updated list of attribute values from top level
def create_update_list(top_level_columns):
list_of_updated_values = []
for index, column in enumerate(top_level_columns):
top_col = df_top_level[column] # Series
top_ids = df_top_level['Case Id'] # Series
nbi_col = list_of_dict[index] # Dictionary
temp_list = []
for top_id, top_val in zip(top_ids, top_col):
if column in identity_columns:
try:
temp_list.append(nbi_col[top_id])
except:
temp_list.append(top_val)
else:
try:
temp_list.append(nbi_col[top_id])
except:
temp_list.append(None)
list_of_updated_values.append(temp_list)
return list_of_updated_values
list_of_updated_values = create_update_list(top_level_columns)
# Update df_top_level
df_top_level['Longitude'] = list_of_updated_values[1]
df_top_level['Latitude'] = list_of_updated_values[2]
df_top_level['Year Built'] = list_of_updated_values[3]
df_top_level['Material'] = list_of_updated_values[4]
df_top_level['Construction Type'] = list_of_updated_values[5]
# Get columns names
ADT = df_top_level.columns[-6]
ADTT = df_top_level.columns[-5]
DECK = df_top_level.columns[-4]
SUPERSTRUCTURE = df_top_level.columns[-3]
SUBSTRUCTURE = df_top_level.columns[-2]
# Update columns for the latest year in top level file
df_top_level[ADT] = list_of_updated_values[6]
df_top_level[ADTT] = list_of_updated_values[7]
df_top_level[DECK] = pd.Series(list_of_updated_values[8]).map(condition_rating_dict)
df_top_level[SUPERSTRUCTURE] = pd.Series(list_of_updated_values[9]).map(condition_rating_dict)
df_top_level[SUBSTRUCTURE] = pd.Series(list_of_updated_values[10]).map(condition_rating_dict)
df_top_level[YEAR] = len(df_top_level)*[None]
# Save intermediate top-level file
df_top_level.to_csv('top-level-intermediate.csv')
#------------------------Updating id in the nbi id-----------------------------#
dict_case_id_name = defaultdict()
for case_id, dc_id in zip(df_top_level['Case Id'], df_top_level['Id']):
dict_case_id_name[case_id] = dc_id
df_nbi['Id'] = df_nbi['Case Id'].map(dict_case_id_name)
filename_nbi = 'NBI' + YEAR + '.csv'
df_nbi.to_csv(filename_nbi)
#----------------Correcting the header of the top level file------------------------#
filename_top = 'TopLevel1992-' + YEAR + '.csv'
with open('top-level-intermediate.csv', 'r') as inFile, open(filename_top, 'w', newline = '') as outFile:
r = csv.reader(inFile)
w = csv.writer(outFile)
lines = inFile.readlines()
lines_reader = lines[0].split(',')
new_words = []
#Following for loop removes '.Number' from the column names
for word in lines_reader:
if '.' in word:
temp_word = ''
for char in word:
if char == '.':
new_words.append(temp_word)
temp_word = ''
else:
temp_word = temp_word + char
else:
new_words.append(word.strip('\n'))
next(r, None)
w.writerow(new_words)
for row in lines[1:]:
write_row = row.split(",")
write_row[-1] = write_row[-1].strip()
w.writerow(write_row)
| gpl-2.0 |
maxlikely/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 5 | 12759 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("sparse_cg", "dense_cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("sparse_cg", "dense_cholesky", "lsqr"):
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y, alpha, sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error)
ridge_gcv2.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func)
ridge_gcv3.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
| bsd-3-clause |
alexmojaki/odo | odo/backends/aws.py | 3 | 9445 | from __future__ import print_function, division, absolute_import
import os
import uuid
import zlib
import re
from contextlib import contextmanager
from collections import Iterator
from operator import attrgetter
import pandas as pd
from toolz import memoize, first
from .. import (discover, CSV, resource, append, convert, drop, Temp, JSON,
JSONLines, chunks)
from multipledispatch import MDNotImplementedError
from .text import TextFile
from ..compatibility import urlparse
from ..utils import tmpfile, ext, sample, filter_kwargs, copydoc
@memoize
def get_s3_connection(aws_access_key_id=None,
aws_secret_access_key=None,
anon=False):
import boto
cfg = boto.Config()
if aws_access_key_id is None:
aws_access_key_id = cfg.get('Credentials', 'aws_access_key_id')
if aws_access_key_id is None:
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
if aws_secret_access_key is None:
aws_secret_access_key = cfg.get('Credentials', 'aws_secret_access_key')
if aws_secret_access_key is None:
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
# anon is False but we didn't provide any credentials so try anonymously
anon = (not anon and
aws_access_key_id is None and
aws_secret_access_key is None)
return boto.connect_s3(aws_access_key_id, aws_secret_access_key,
anon=anon)
class _S3(object):
"""Parametrized S3 bucket Class
Examples
--------
>>> S3(CSV)
<class 'odo.backends.aws.S3(CSV)'>
"""
def __init__(self, uri, s3=None, aws_access_key_id=None,
aws_secret_access_key=None, *args, **kwargs):
import boto
result = urlparse(uri)
self.bucket = result.netloc
self.key = result.path.lstrip('/')
if s3 is not None:
self.s3 = s3
else:
self.s3 = get_s3_connection(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
try:
bucket = self.s3.get_bucket(self.bucket,
**filter_kwargs(self.s3.get_bucket,
kwargs))
except boto.exception.S3ResponseError:
bucket = self.s3.create_bucket(self.bucket,
**filter_kwargs(self.s3.create_bucket,
kwargs))
self.object = bucket.get_key(self.key, **filter_kwargs(bucket.get_key,
kwargs))
if self.object is None:
self.object = bucket.new_key(self.key)
self.subtype.__init__(self, uri, *args,
**filter_kwargs(self.subtype.__init__, kwargs))
@memoize
@copydoc(_S3)
def S3(cls):
return type('S3(%s)' % cls.__name__, (_S3, cls), {'subtype': cls})
@sample.register((S3(CSV), S3(JSONLines)))
@contextmanager
def sample_s3_line_delimited(data, length=8192):
"""Get a size `length` sample from an S3 CSV or S3 line-delimited JSON.
Parameters
----------
data : S3(CSV)
A CSV file living in an S3 bucket
length : int, optional, default ``8192``
Number of bytes of the file to read
"""
headers = {'Range': 'bytes=0-%d' % length}
if data.object.exists():
key = data.object
else: # we are probably trying to read from a set of files
keys = sorted(data.object.bucket.list(prefix=data.object.key),
key=attrgetter('key'))
if not keys:
# we didn't find anything with a prefix of data.object.key
raise ValueError('Object %r does not exist and no keys with a '
'prefix of %r exist' %
(data.object, data.object.key))
key = first(keys)
raw = key.get_contents_as_string(headers=headers)
if ext(key.key) == 'gz':
# decompressobj allows decompression of partial streams
raw = zlib.decompressobj(32 + zlib.MAX_WBITS).decompress(raw)
# this is generally cheap as we usually have a tiny amount of data
try:
index = raw.rindex(b'\r\n')
except ValueError:
index = raw.rindex(b'\n')
raw = raw[:index]
with tmpfile(ext(re.sub(r'\.gz$', '', data.path))) as fn:
# we use wb because without an encoding boto returns bytes
with open(fn, 'wb') as f:
f.write(raw)
yield fn
@discover.register((S3(CSV), S3(JSONLines)))
def discover_s3_line_delimited(c, length=8192, **kwargs):
"""Discover CSV and JSONLines files from S3."""
with sample(c, length=length) as fn:
return discover(c.subtype(fn, **kwargs), **kwargs)
@resource.register('s3://.*\.csv(\.gz)?', priority=18)
def resource_s3_csv(uri, **kwargs):
return S3(CSV)(uri, **kwargs)
@resource.register('s3://.*\.txt(\.gz)?', priority=18)
def resource_s3_text(uri, **kwargs):
return S3(TextFile)(uri)
@resource.register('s3://.*\.json(\.gz)?', priority=18)
def resource_s3_json_lines(uri, **kwargs):
return S3(JSONLines)(uri, **kwargs)
@drop.register((S3(CSV), S3(JSON), S3(JSONLines), S3(TextFile)))
def drop_s3(s3):
s3.object.delete()
@drop.register((Temp(S3(CSV)), Temp(S3(JSON)), Temp(S3(JSONLines)),
Temp(S3(TextFile))))
def drop_temp_s3(s3):
s3.object.delete()
s3.object.bucket.delete()
@convert.register(Temp(CSV), (Temp(S3(CSV)), S3(CSV)))
@convert.register(Temp(JSON), (Temp(S3(JSON)), S3(JSON)))
@convert.register(Temp(JSONLines), (Temp(S3(JSONLines)), S3(JSONLines)))
@convert.register(Temp(TextFile), (Temp(S3(TextFile)), S3(TextFile)))
def s3_text_to_temp_text(s3, **kwargs):
tmp_filename = '.%s.%s' % (uuid.uuid1(), ext(s3.path))
s3.object.get_contents_to_filename(tmp_filename)
return Temp(s3.subtype)(tmp_filename, **kwargs)
@append.register(CSV, S3(CSV))
@append.register(JSON, S3(JSON))
@append.register(JSONLines, S3(JSONLines))
@append.register(TextFile, S3(TextFile))
def s3_text_to_text(data, s3, **kwargs):
return append(data, convert(Temp(s3.subtype), s3, **kwargs), **kwargs)
@append.register((S3(CSV), Temp(S3(CSV))), (S3(CSV), Temp(S3(CSV))))
@append.register((S3(JSON), Temp(S3(JSON))), (S3(JSON), Temp(S3(JSON))))
@append.register((S3(JSONLines), Temp(S3(JSONLines))),
(S3(JSONLines), Temp(S3(JSONLines))))
@append.register((S3(TextFile), Temp(S3(TextFile))),
(S3(TextFile), Temp(S3(TextFile))))
def temp_s3_to_s3(a, b, **kwargs):
a.object.bucket.copy_key(b.object.name, a.object.bucket.name,
b.object.name)
return a
@convert.register(Temp(S3(CSV)), (CSV, Temp(CSV)))
@convert.register(Temp(S3(JSON)), (JSON, Temp(JSON)))
@convert.register(Temp(S3(JSONLines)), (JSONLines, Temp(JSONLines)))
@convert.register(Temp(S3(TextFile)), (TextFile, Temp(TextFile)))
def text_to_temp_s3_text(data, **kwargs):
subtype = getattr(data, 'persistent_type', type(data))
uri = 's3://%s/%s.%s' % (uuid.uuid1(), uuid.uuid1(), ext(data.path))
return append(Temp(S3(subtype))(uri, **kwargs), data)
@append.register((S3(CSV), S3(JSON), S3(JSONLines), S3(TextFile)),
(pd.DataFrame, chunks(pd.DataFrame), (list, Iterator)))
def anything_to_s3_text(s3, o, **kwargs):
return append(s3, convert(Temp(s3.subtype), o, **kwargs), **kwargs)
@append.register(S3(JSONLines), (JSONLines, Temp(JSONLines)))
@append.register(S3(JSON), (JSON, Temp(JSON)))
@append.register(S3(CSV), (CSV, Temp(CSV)))
@append.register(S3(TextFile), (TextFile, Temp(TextFile)))
def append_text_to_s3(s3, data, **kwargs):
s3.object.set_contents_from_filename(data.path)
return s3
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(S3(JSON), HDFS(JSON))
@append.register(S3(JSONLines), HDFS(JSONLines))
@append.register(S3(CSV), HDFS(CSV))
@append.register(S3(TextFile), HDFS(TextFile))
@append.register(HDFS(JSON), S3(JSON))
@append.register(HDFS(JSONLines), S3(JSONLines))
@append.register(HDFS(CSV), S3(CSV))
@append.register(HDFS(TextFile), S3(TextFile))
def other_remote_text_to_s3_text(a, b, **kwargs):
raise MDNotImplementedError()
try:
from .ssh import connect, _SSH, SSH
except ImportError:
pass
else:
@append.register(S3(JSON), SSH(JSON))
@append.register(S3(JSONLines), SSH(JSONLines))
@append.register(S3(CSV), SSH(CSV))
@append.register(S3(TextFile), SSH(TextFile))
def remote_text_to_s3_text(a, b, **kwargs):
return append(a, convert(Temp(b.subtype), b, **kwargs), **kwargs)
@append.register(_SSH, _S3)
def s3_to_ssh(ssh, s3, url_timeout=600, **kwargs):
if s3.s3.anon:
url = 'https://%s.s3.amazonaws.com/%s' % (s3.bucket, s3.object.name)
else:
url = s3.object.generate_url(url_timeout)
command = "wget '%s' -qO- >> '%s'" % (url, ssh.path)
conn = connect(**ssh.auth)
_, stdout, stderr = conn.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
if exit_status:
raise ValueError('Error code %d, message: %r' % (exit_status,
stderr.read()))
return ssh
| bsd-3-clause |
phobson/pygridtools | pygridtools/misc.py | 2 | 13682 | from collections import OrderedDict
from shapely.geometry import Point, Polygon
import numpy
import matplotlib.path as mpath
import pandas
from shapely import geometry
import geopandas
from pygridgen import csa
from pygridtools import validate
def make_poly_coords(xarr, yarr, zpnt=None, triangles=False):
""" Makes an array for coordinates suitable for building
quadrilateral geometries in shapfiles via fiona.
Parameters
----------
xarr, yarr : numpy arrays
Arrays (2x2) of x coordinates and y coordinates for each vertex
of the quadrilateral.
zpnt : optional float or None (default)
If provided, this elevation value will be assigned to all four
vertices.
triangles : optional bool (default = False)
If True, triangles will be returned
Returns
-------
coords : numpy array
An array suitable for feeding into fiona as the geometry of a record.
"""
def process_input(array):
flat = numpy.hstack([array[0, :], array[1, ::-1]])
return flat[~numpy.isnan(flat)]
x = process_input(xarr)
y = process_input(yarr)
if (not isinstance(xarr, numpy.ma.MaskedArray) or xarr.mask.sum() == 0 or
(triangles and len(x) == 3)):
if zpnt is None:
coords = numpy.vstack([x, y]).T
else:
z = numpy.array([zpnt] * x.shape[0])
coords = numpy.vstack([x, y, z]).T
else:
coords = None
return coords
def make_record(ID, coords, geomtype, props):
""" Creates a record to be appended to a GIS file via *geopandas*.
Parameters
----------
ID : int
The record ID number
coords : tuple or array-like
The x-y coordinates of the geometry. For Points, just a tuple.
An array or list of tuples for LineStrings or Polygons
geomtype : string
A valid GDAL/OGR geometry specification (e.g. LineString, Point,
Polygon)
props : dict or collections.OrderedDict
A dict-like object defining the attributes of the record
Returns
-------
record : dict
A nested dictionary suitable for the a *geopandas.GeoDataFrame*,
Notes
-----
This is ignore the mask of a MaskedArray. That might be bad.
"""
if geomtype not in ['Point', 'LineString', 'Polygon']:
raise ValueError('Geometry {} not suppered'.format(geomtype))
if isinstance(coords, numpy.ma.MaskedArray):
coords = coords.data
if isinstance(coords, numpy.ndarray):
coords = coords.tolist()
record = {
'id': ID,
'geometry': {
'coordinates': coords if geomtype == 'Point' else [coords],
'type': geomtype
},
'properties': props
}
return record
def interpolate_bathymetry(bathy, x_points, y_points, xcol='x', ycol='y', zcol='z'):
""" Interpolates x-y-z point data onto the grid of a Gridgen object.
Matplotlib's nearest-neighbor interpolation schema is used to
estimate the elevation at the grid centers.
Parameters
----------
bathy : pandas.DataFrame or None
The bathymetry data stored as x-y-z points in a DataFrame.
[x|y]_points : numpy arrays
The x, y locations onto which the bathymetry will be
interpolated.
xcol/ycol/zcol : optional strings
Column names for each of the quantities defining the elevation
pints. Defaults are "x/y/z".
Returns
-------
gridbathy : pandas.DataFrame
The bathymetry for just the area covering the grid.
"""
try:
import pygridgen
except ImportError: # pragma: no cover
raise ImportError("`pygridgen` not installed. Cannot interpolate bathymetry.")
if bathy is None:
elev = numpy.zeros(x_points.shape)
if isinstance(x_points, numpy.ma.MaskedArray):
elev = numpy.ma.MaskedArray(data=elev, mask=x_points.mask)
bathy = pandas.DataFrame({
xcol: x_points.flatten(),
ycol: y_points.flatten(),
zcol: elev.flatten()
})
else:
bathy = bathy[[xcol, ycol, zcol]]
# find where the bathymetry is inside our grid
grididx = (
(bathy[xcol] <= x_points.max()) &
(bathy[xcol] >= x_points.min()) &
(bathy[ycol] <= y_points.max()) &
(bathy[ycol] >= y_points.min())
)
gridbathy = bathy[grididx].dropna(how='any')
# fill in NaNs with something outside of the bounds
xx = x_points.copy()
yy = y_points.copy()
xx[numpy.isnan(x_points)] = x_points.max() + 5
yy[numpy.isnan(y_points)] = y_points.max() + 5
# use cubic-spline approximation to interpolate the grid
interpolate = csa.CSA(
gridbathy[xcol].values,
gridbathy[ycol].values,
gridbathy[zcol].values
)
return interpolate(xx, yy)
def padded_stack(a, b, how='vert', where='+', shift=0, padval=numpy.nan):
""" Merge 2-dimensional numpy arrays with different shapes.
Parameters
----------
a, b : numpy arrays
The arrays to be merged
how : optional string (default = 'vert')
The method through wich the arrays should be stacked. `'Vert'`
is analogous to `numpy.vstack`. `'Horiz'` maps to `numpy.hstack`.
where : optional string (default = '+')
The placement of the arrays relative to each other. Keeping in
mind that the origin of an array's index is in the upper-left
corner, `'+'` indicates that the second array will be placed
at higher index relative to the first array. Essentially:
- if how == 'vert'
- `'+'` -> `a` is above (higher index) `b`
- `'-'` -> `a` is below (lower index) `b`
- if how == 'horiz'
- `'+'` -> `a` is to the left of `b`
- `'-'` -> `a` is to the right of `b`
See the examples for more info.
shift : int (default = 0)
The number of indices the second array should be shifted in
axis other than the one being merged. In other words, vertically
stacked arrays can be shifted horizontally, and horizontally
stacked arrays can be shifted vertically.
padval : optional, same type as array (default = numpy.nan)
Value with which the arrays will be padded.
Returns
-------
Stacked : numpy array
The merged and padded array
Examples
--------
>>> import pygridtools as pgt
>>> a = numpy.arange(12).reshape(4, 3) * 1.0
>>> b = numpy.arange(8).reshape(2, 4) * -1.0
>>> pgt.padded_stack(a, b, how='vert', where='+', shift=2)
array([[ 0., 1., 2., -, -, -],
[ 3., 4., 5., -, -, -],
[ 6., 7., 8., -, -, -],
[ 9., 10., 11., -, -, -],
[ -, -, -0., -1., -2., -3.],
[ -, -, -4., -5., -6., -7.]])
>>> pgt.padded_stack(a, b, how='h', where='-', shift=-1)
array([[-0., -1., -2., -3., -, -, -],
[-4., -5., -6., -7., 0., 1., 2.],
[ -, -, -, -, 3., 4., 5.],
[ -, -, -, -, 6., 7., 8.],
[ -, -, -, -, 9., 10., 11.]])
"""
a = numpy.asarray(a)
b = numpy.asarray(b)
if where == '-':
stacked = padded_stack(b, a, shift=-1 * shift, where='+', how=how)
elif where == '+':
if how.lower() in ('horizontal', 'horiz', 'h'):
stacked = padded_stack(a.T, b.T, shift=shift, where=where,
how='v').T
elif how.lower() in ('vertical', 'vert', 'v'):
a_pad_left = 0
a_pad_right = 0
b_pad_left = 0
b_pad_right = 0
diff_cols = a.shape[1] - (b.shape[1] + shift)
if shift > 0:
b_pad_left = shift
elif shift < 0:
a_pad_left = abs(shift)
if diff_cols > 0:
b_pad_right = diff_cols
else:
a_pad_right = abs(diff_cols)
v_pads = (0, 0)
x_pads = (v_pads, (a_pad_left, a_pad_right))
y_pads = (v_pads, (b_pad_left, b_pad_right))
mode = 'constant'
fill = (padval, padval)
stacked = numpy.vstack([
numpy.pad(a, x_pads, mode=mode, constant_values=fill),
numpy.pad(b, y_pads, mode=mode, constant_values=fill)
])
else:
gen_msg = 'how must be either "horizontal" or "vertical"'
raise ValueError(gen_msg)
else:
raise ValueError('`where` must be either "+" or "-"')
return stacked
def padded_sum(padded, window=1):
return (padded[window:, window:] + padded[:-window, :-window] +
padded[:-window, window:] + padded[window:, :-window])
def mask_with_polygon(x, y, *polyverts, inside=True):
""" Mask x-y arrays inside or outside a polygon
Parameters
----------
x, y : array-like
NxM arrays of x- and y-coordinates.
polyverts : sequence of a polygon's vertices
A sequence of x-y pairs for each vertex of the polygon.
inside : bool (default is True)
Toggles masking the inside or outside the polygon
Returns
-------
mask : bool array
The NxM mask that can be applied to ``x`` and ``y``.
"""
# validate input
polyverts = [validate.polygon(pv) for pv in polyverts]
points = validate.xy_array(x, y, as_pairs=True)
# compute the mask
mask = numpy.dstack([
mpath.Path(pv).contains_points(points).reshape(x.shape)
for pv in polyverts
]).any(axis=-1)
if not inside:
mask = ~mask
return mask
def gdf_of_cells(X, Y, mask, crs, elev=None, triangles=False):
""" Saves a GIS file of quadrilaterals representing grid cells.
Parameters
----------
X, Y : numpy (masked) arrays, same dimensions
Attributes of the gridgen object representing the x- and y-coords.
mask : numpy array or None
Array describing which cells to mask (exclude) from the output.
Shape should be N-1 by M-1, where N and M are the dimensions of
`X` and `Y`.
crs : string
A geopandas/proj/fiona-compatible string describing the coordinate
reference system of the x/y values.
elev : optional array or None (defauly)
The elevation of the grid cells. Shape should be N-1 by M-1,
where N and M are the dimensions of `X` and `Y` (like `mask`).
triangles : optional bool (default = False)
If True, triangles can be included
Returns
-------
geopandas.GeoDataFrame
"""
# check X, Y shapes
Y = validate.elev_or_mask(X, Y, 'Y', offset=0)
# check elev shape
elev = validate.elev_or_mask(X, elev, 'elev', offset=0)
# check the mask shape
mask = validate.elev_or_mask(X, mask, 'mask', offset=1)
X = numpy.ma.masked_invalid(X)
Y = numpy.ma.masked_invalid(Y)
ny, nx = X.shape
row = 0
geodata = []
for ii in range(nx - 1):
for jj in range(ny - 1):
if not (numpy.any(X.mask[jj:jj + 2, ii:ii + 2]) or mask[jj, ii]):
row += 1
Z = elev[jj, ii]
# build the array or coordinates
coords = make_poly_coords(
xarr=X[jj:jj + 2, ii:ii + 2],
yarr=Y[jj:jj + 2, ii:ii + 2],
zpnt=Z, triangles=triangles
)
# build the attributes
record = OrderedDict(
id=row, ii=ii + 2, jj=jj + 2, elev=Z,
ii_jj='{:02d}_{:02d}'.format(ii + 2, jj + 2),
geometry=Polygon(shell=coords)
)
# append to file is coordinates are not masked
# (masked = beyond the river boundary)
if coords is not None:
geodata.append(record)
gdf = geopandas.GeoDataFrame(geodata, crs=crs, geometry='geometry')
return gdf
def gdf_of_points(X, Y, crs, elev=None):
""" Saves grid-related attributes of a pygridgen.Gridgen object to a
GIS file with geomtype = 'Point'.
Parameters
----------
X, Y : numpy (masked) arrays, same dimensions
Attributes of the gridgen object representing the x- and y-coords.
crs : string
A geopandas/proj/fiona-compatible string describing the coordinate
reference system of the x/y values.
elev : optional array or None (defauly)
The elevation of the grid cells. Array dimensions must be 1 less than
X and Y.
Returns
-------
geopandas.GeoDataFrame
"""
# check that X and Y are have the same shape, NaN cells
X, Y = validate.equivalent_masks(X, Y)
# check elev shape
elev = validate.elev_or_mask(X, elev, 'elev', offset=0)
# start writting or appending to the output
row = 0
geodata = []
for ii in range(X.shape[1]):
for jj in range(X.shape[0]):
# check that nothing is masked (outside of the river)
if not (X.mask[jj, ii]):
row += 1
# build the coords
coords = (X[jj, ii], Y[jj, ii])
# build the attributes
record = OrderedDict(
id=int(row), ii=int(ii + 2), jj=int(jj + 2),
elev=float(elev[jj, ii]),
ii_jj='{:02d}_{:02d}'.format(ii + 2, jj + 2),
geometry=Point(coords)
)
geodata.append(record)
gdf = geopandas.GeoDataFrame(geodata, crs=crs, geometry='geometry')
return gdf
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| mit |
KarrLab/kinetic_datanator | datanator/util/rna_halflife_util.py | 1 | 6850 | from datanator_query_python.query import query_uniprot
from datanator.data_source import uniprot_nosql
from datanator.util import file_util
from datanator_query_python.util import mongo_util
from pathlib import Path, PurePath, PurePosixPath
import math
import pandas as pd
class RnaHLUtil(mongo_util.MongoUtil):
def __init__(self, server=None, username=None, password=None, src_db=None,
des_db=None, protein_col=None, rna_col=None, authDB='admin', readPreference=None,
max_entries=float('inf'), verbose=False, cache_dir=None):
super().__init__(MongoDB=server, db=des_db, verbose=verbose, max_entries=max_entries,
username=username, password=password, authSource=authDB, readPreference=readPreference)
_, _, self.rna_hl_collection = self.con_db(rna_col)
self.max_entries = max_entries
self.verbose = verbose
self.file_manager = file_util.FileUtil()
self.cache_dir = cache_dir
self.uniprot_query_manager = query_uniprot.QueryUniprot(username=username, password=password,
server=server, authSource=authDB,
database=src_db, collection_str=protein_col)
self.uniprot_collection_manager = uniprot_nosql.UniprotNoSQL(MongoDB=server, db=des_db, verbose=True,
username=username, password=password, authSource=authDB, collection_str=protein_col)
def uniprot_names(self, results, count):
"""Extract protein_name and gene_name from returned
tuple of uniprot query function
Args:
results (:obj:`Iter`): pymongo cursor object.
count (:obj:`int`): Number of documents found.
Return:
(:obj:`tuple` of :obj:`str`): gene_name and protein_name
"""
if count == 0:
return '', ''
else:
for result in results:
gene_name = result['gene_name']
protein_name = result['protein_name']
return gene_name, protein_name
def fill_uniprot_by_oln(self, oln, species=None):
"""Fill uniprot collection using ordered locus name
Args:
oln (:obj:`str`): Ordered locus name
species (:obj:`list`): NCBI Taxonomy ID of the species
"""
gene_name, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_oln(oln.split(' or '), species=species)
if gene_name is None and protein_name is None: # no such entry in uniprot collection
self.uniprot_collection_manager.load_uniprot(query=True, msg=oln, species=species)
else:
return
def fill_uniprot_by_gn(self, gene_name, species=None):
"""Fill uniprot collection using gene name
Args:
gene_name (:obj:`str`): Ordered locus name
species (:obj:`list`): NCBI Taxonomy ID of the species
"""
protein_name = self.uniprot_query_manager.get_protein_name_by_gn(gene_name.split(' or '), species=species)
if protein_name is None: # no such entry in uniprot collection
self.uniprot_collection_manager.load_uniprot(query=True, msg=gene_name, species=species)
else:
return
def fill_uniprot_by_embl(self, embl, species=None):
"""Fill uniprot collection using EMBL data
Args:
embl (:obj:`str`): sequence embl data
species (:obj:`list`): NCBI Taxonomy ID of the species
"""
_, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_embl(embl.split(' or '), species=species)
if protein_name is None: # no such entry in uniprot collection
self.uniprot_collection_manager.load_uniprot(query=True, msg=embl, species=species)
else:
return
def make_df(self, url, sheet_name, header=0, names=None, usecols=None,
skiprows=None, nrows=None, na_values=None, file_type='xlsx',
file_name=None):
"""Read online excel file as dataframe
Args:
url (:obj:`str`): excel file url
sheet_name (:obj:`str`): name of sheet in xlsx
header (:obj:`int`): Row (0-indexed) to use for the column labels of the parsed DataFrame.
names (:obj:`list`): list of column names to use
usecols (:obj:`int` or :obj:`list` or :obj:`str`): Return a subset of the columns.
nrows (:obj:`int`): number of rows to parse. Defaults to None.
file_type (:obj:`str`): downloaded file type. Defaults to xlsx.
file_name (:obj:`str`): name of the file of interest.
Returns:
(:obj:`pandas.DataFrame`): xlsx transformed to pandas.DataFrame
"""
if file_type == 'xlsx' or 'xls':
data = pd.read_excel(url, sheet_name=sheet_name, nrows=nrows, header=header,
names=names, usecols=usecols, skiprows=skiprows, na_values=na_values)
elif file_type == 'zip':
self.file_manager.unzip_file(url, self.cache_dir)
cwd = PurePosixPath(self.cache_dir).joinpath(file_name)
data = pd.read_excel(cwd, sheet_name=sheet_name, nrows=nrows, header=header,
names=names, usecols=usecols, skiprows=skiprows, na_values=na_values)
return data
def fill_uniprot_with_df(self, df, identifier, identifier_type='oln', species=None):
"""Fill uniprot colleciton with ordered_locus_name
from excel sheet
Args:
df (:obj:`pandas.DataFrame`): dataframe to be inserted into uniprot collection.
Assuming df conforms to the schemas required by load_uniprot function in uniprot.py
identifier (:obj:`str`): name of column that stores ordered locus name information.
identifier_type (:obj:`str`): type of identifier, i.e. 'oln', 'gene_name'
species (:obj:`list`): NCBI Taxonomy ID of the species.
"""
row_count = len(df.index)
for index, row in df.iterrows():
if index == self.max_entries:
break
if index % 10 == 0 and self.verbose:
print("Inserting locus {}: {} out of {} into uniprot collection.".format(index, row[identifier], row_count))
names = row[identifier].split(',')
condition = ' or '
name = condition.join(names)
if identifier_type == 'oln':
self.fill_uniprot_by_oln(name, species=species)
elif identifier_type == 'gene_name':
self.fill_uniprot_by_gn(name, species=species)
elif identifier_type == 'sequence_embl':
self.fill_uniprot_by_embl(name, species=species) | mit |
liulohua/ml_tutorial | linear_regression/plot_error_surface.py | 1 | 1119 | import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def fun(w, b):
# prepare data
x_min = -10.
x_max = 10.
m = 100
x = np.random.uniform(x_min, x_max, m)
true_w = 5.
true_b = 5.
y_noise_sigma = 3.
y_ = true_w * x + true_b# + np.random.randn(len(x)) * y_noise_sigma
y = w * x + b
error = np.mean(np.square(y - y_) / 2.0)
return error
# error_surface = np.zeros([100, 100])
# w_count = 0
# for w in np.linspace(-10, 20, 100):
# b_count = 0
# for b in np.linspace(-10, 20, 100):
# y = w * x + b
# error = np.mean(np.square(y - y_) / 2.0)
# error_surface[w_count, b_count] = error
# b_count += 1
# w_count += 1
fig = plt.figure()
ax = Axes3D(fig)
w = np.linspace(4, 6, 100)
b = np.linspace(2, 8, 100)
W, B = np.meshgrid(w, b)
E = np.array([fun(w, b) for w, b in zip(np.ravel(W), np.ravel(B))])
E = E.reshape(W.shape)
ax.plot_surface(W, B, E, rstride=1, cstride=1, cmap=plt.cm.hot)
ax.set_xlabel('W Label')
ax.set_ylabel('B Label')
ax.set_zlabel('E Label')
plt.show()
| apache-2.0 |
olafhauk/mne-python | mne/viz/tests/test_misc.py | 9 | 10531 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import os.path as op
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate, pick_events)
from mne.datasets import testing
from mne.filter import create_filter
from mne.io import read_raw_fif
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate, plot_filter, plot_csd)
from mne.viz.misc import _handle_event_colors
from mne.viz.utils import _get_color_list
from mne.utils import requires_nibabel, run_tests_if_main
from mne.time_frequency import CrossSpectralDensity
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=True)
def _get_events():
"""Get events."""
return read_events(event_fname)
def test_plot_filter():
"""Test filter plotting."""
l_freq, h_freq, sfreq = 2., 40., 1000.
data = np.zeros(5000)
freq = [0, 2, 40, 50, 500]
gain = [0, 1, 1, 0, 0]
h = create_filter(data, sfreq, l_freq, h_freq, fir_design='firwin2')
plot_filter(h, sfreq)
plt.close('all')
plot_filter(h, sfreq, freq, gain)
plt.close('all')
iir = create_filter(data, sfreq, l_freq, h_freq, method='iir')
plot_filter(iir, sfreq)
plt.close('all')
plot_filter(iir, sfreq, freq, gain)
plt.close('all')
iir_ba = create_filter(data, sfreq, l_freq, h_freq, method='iir',
iir_params=dict(output='ba'))
plot_filter(iir_ba, sfreq, freq, gain)
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear')
assert len(fig.axes) == 3
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=('time', 'delay'))
assert len(fig.axes) == 2
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=['magnitude', 'delay'])
assert len(fig.axes) == 2
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot='magnitude')
assert len(fig.axes) == 1
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=('magnitude'))
assert len(fig.axes) == 1
plt.close('all')
with pytest.raises(ValueError, match='Invalid value for the .plot'):
plot_filter(h, sfreq, freq, gain, plot=('turtles'))
_, axes = plt.subplots(1)
fig = plot_filter(h, sfreq, freq, gain, plot=('magnitude'), axes=axes)
assert len(fig.axes) == 1
_, axes = plt.subplots(2)
fig = plot_filter(h, sfreq, freq, gain, plot=('magnitude', 'delay'),
axes=axes)
assert len(fig.axes) == 2
plt.close('all')
_, axes = plt.subplots(1)
with pytest.raises(ValueError, match='Length of axes'):
plot_filter(h, sfreq, freq, gain,
plot=('magnitude', 'delay'), axes=axes)
def test_plot_cov():
"""Test plotting of covariances."""
raw = _get_raw()
cov = read_cov(cov_fname)
with pytest.warns(RuntimeWarning, match='projection'):
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
plt.close('all')
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours."""
with pytest.raises(IOError, match='MRI file .* not found'):
plot_bem(subject='bad-subject', subjects_dir=subjects_dir)
with pytest.raises(ValueError, match="Invalid value for the 'orientation"):
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='bad-ori')
with pytest.raises(ValueError, match="sorted 1D array"):
plot_bem(subject='sample', subjects_dir=subjects_dir, slices=[0, 500])
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
assert len(fig.axes) == 2
assert len(fig.axes[0].collections) == 3 # 3 BEM surfaces ...
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', brain_surfaces='white')
assert len(fig.axes[0].collections) == 5 # 3 BEM surfaces + 2 hemis
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', slices=[25, 50], src=src_fname)
assert len(fig.axes[0].collections) == 4 # 3 BEM surfaces + 1 src contour
with pytest.raises(ValueError, match='MRI coordinates, got head'):
plot_bem(subject='sample', subjects_dir=subjects_dir,
src=inv_fname)
def test_event_colors():
"""Test color assignment."""
events = pick_events(_get_events(), include=[1, 2])
unique_events = set(events[:, 2])
# make sure defaults work
colors = _handle_event_colors(None, unique_events, dict())
default_colors = _get_color_list()
assert colors[1] == default_colors[0]
# make sure custom color overrides default
colors = _handle_event_colors(color_dict=dict(foo='k', bar='#facade'),
unique_events=unique_events,
event_id=dict(foo=1, bar=2))
assert colors[1] == 'k'
assert colors[2] == '#facade'
def test_plot_events():
"""Test plotting events."""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
fig = plot_events(events, raw.info['sfreq'], raw.first_samp)
assert fig.axes[0].get_legend() is not None # legend even with no event_id
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
with pytest.warns(RuntimeWarning, match='will be ignored'):
fig = plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
assert fig.axes[0].get_legend() is not None
with pytest.warns(RuntimeWarning, match='Color was not assigned'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
with pytest.warns(RuntimeWarning, match=r'vent \d+ missing from event_id'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
multimatch = r'event \d+ missing from event_id|in the color dict but is'
with pytest.warns(RuntimeWarning, match=multimatch):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id={'aud_l': 1}, color=color)
extra_id = {'missing': 111}
with pytest.raises(ValueError, match='from event_id is not present in'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id)
with pytest.raises(RuntimeError, match='No usable event IDs'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
extra_id = {'aud_l': 1, 'missing': 111}
with pytest.warns(RuntimeWarning, match='from event_id is not present in'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='warn')
with pytest.warns(RuntimeWarning, match='event 2 missing'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
events = events[events[:, 2] == 1]
assert len(events) > 0
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
with pytest.raises(ValueError, match='No events'):
plot_events(np.empty((0, 3)))
plt.close('all')
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram."""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
pytest.raises(ValueError, plot_source_spectrogram, [], [])
pytest.raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
pytest.raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
plt.close('all')
@pytest.mark.slowtest
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate."""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
plt.close('all')
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes."""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
plt.close('all')
def test_plot_csd():
"""Test plotting of CSD matrices."""
csd = CrossSpectralDensity([1, 2, 3], ['CH1', 'CH2'],
frequencies=[(10, 20)], n_fft=1,
tmin=0, tmax=1,)
plot_csd(csd, mode='csd') # Plot cross-spectral density
plot_csd(csd, mode='coh') # Plot coherence
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
mathemage/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_iris_h2o_vs_sciKmeans.py | 8 | 1469 | from __future__ import print_function
from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn.cluster import KMeans
from h2o.estimators.kmeans import H2OKMeansEstimator
def iris_h2o_vs_sciKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
iris_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
iris_sci = np.genfromtxt(pyunit_utils.locate("smalldata/iris/iris.csv"), delimiter=',')
iris_sci = iris_sci[:,0:4]
s =[[4.9,3.0,1.4,0.2],
[5.6,2.5,3.9,1.1],
[6.5,3.0,5.2,2.0]]
start = h2o.H2OFrame(s)
h2o_km = H2OKMeansEstimator(k=3, user_points=start, standardize=False)
h2o_km.train(x=list(range(4)),training_frame=iris_h2o)
sci_km = KMeans(n_clusters=3, init=np.asarray(s), n_init=1)
sci_km.fit(iris_sci)
# Log.info("Cluster centers from H2O:")
print("Cluster centers from H2O:")
h2o_centers = h2o_km.centers()
print(h2o_centers)
# Log.info("Cluster centers from scikit:")
print("Cluster centers from scikit:")
sci_centers = sci_km.cluster_centers_.tolist()
for hcenter, scenter in zip(h2o_centers, sci_centers):
for hpoint, spoint in zip(hcenter,scenter):
assert (hpoint- spoint) < 1e-10, "expected centers to be the same"
if __name__ == "__main__":
pyunit_utils.standalone_test(iris_h2o_vs_sciKmeans)
else:
iris_h2o_vs_sciKmeans()
| apache-2.0 |
ceefour/opencog | opencog/python/spatiotemporal/temporal_events/membership_function.py | 34 | 4673 | from math import fabs
from random import random
from scipy.stats.distributions import rv_frozen
from spatiotemporal.time_intervals import TimeInterval
from spatiotemporal.unix_time import random_time, UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.functions import Function, FunctionPiecewiseLinear,\
FunctionHorizontalLinear, FunctionComposite, FUNCTION_ZERO, FUNCTION_ONE, FunctionLinear
from numpy import PINF as POSITIVE_INFINITY, NINF as NEGATIVE_INFINITY
from utility.numeric.globals import EPSILON
__author__ = 'keyvan'
class MembershipFunction(Function):
def __init__(self, temporal_event):
Function.__init__(self, function_undefined=FUNCTION_ZERO, domain=temporal_event)
def call_on_single_point(self, time_step):
return self.domain.distribution_beginning.cdf(time_step) - self.domain.distribution_ending.cdf(time_step)
class ProbabilityDistributionPiecewiseLinear(list, TimeInterval, rv_frozen):
dist = 'ProbabilityDistributionPiecewiseLinear'
_mean = None
asd = None
def __init__(self, dictionary_input_output):
cdf_input_list, cdf_output_list = convert_dict_to_sorted_lists(dictionary_input_output)
list.__init__(self, cdf_input_list)
TimeInterval.__init__(self, self[0], self[-1], 2)
self.cdf = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
self.cdf.dictionary_bounds_function[(self.b, POSITIVE_INFINITY)] = FUNCTION_ONE
pdf_output_list = []
dictionary_bounds_function = {}
for bounds in sorted(self.cdf.dictionary_bounds_function):
a, b = bounds
if a in [NEGATIVE_INFINITY, POSITIVE_INFINITY] or b in [NEGATIVE_INFINITY, POSITIVE_INFINITY]:
continue
pdf_y_intercept = fabs(self.cdf.derivative((a + b) / 2.0))
pdf_output_list.append(pdf_y_intercept)
dictionary_bounds_function[bounds] = FunctionHorizontalLinear(pdf_y_intercept)
self.pdf = FunctionComposite(dictionary_bounds_function, function_undefined=FUNCTION_ZERO, domain=self,
is_normalised=True)
self.roulette_wheel = []
self._mean = 0
for bounds in sorted(self.pdf.dictionary_bounds_function):
(a, b) = bounds
if a in [NEGATIVE_INFINITY, POSITIVE_INFINITY] and b in [NEGATIVE_INFINITY, POSITIVE_INFINITY]:
continue
cdf = self.cdf.dictionary_bounds_function[bounds]
pdf = self.pdf.dictionary_bounds_function[bounds]
share = cdf(b)
self.roulette_wheel.append((a, b, share))
self._mean += (a + b) / 2.0 * pdf(a) * (b - a)
def std(self):
# Not properly implemented
return 0
def stats(self, moments='mv'):
# Not properly implemented
# m, s, k
return self.mean(), 0, 0
def mean(self):
return self._mean
def interval(self, alpha):
if alpha == 1:
return self.a, self.b
raise NotImplementedError("'interval' is not implemented for 'alpha' other than 1")
def rvs(self, size=None):
if size is None:
size = 1
else:
assert isinstance(size, int)
result = []
start, end = 0, 0
for i in xrange(size):
rand = random()
for a, b, share in self.roulette_wheel:
if rand < share:
start, end = a, b
break
result.append(random_time(start, end))
if size == 1:
return result[0]
return result
# def plot(self):
# import matplotlib.pyplot as plt
# x_axis, y_axis = [], []
# for time_step in self:
# x_axis.append(UnixTime(time_step - EPSILON).to_datetime())
# x_axis.append(UnixTime(time_step + EPSILON).to_datetime())
# y_axis.append(self.pdf(time_step - EPSILON))
# y_axis.append(self.pdf(time_step + EPSILON))
# plt.plot(x_axis, y_axis)
# return plt
def plot(self):
import matplotlib.pyplot as plt
x_axis, y_axis = [], []
for time_step in self:
x_axis.append(time_step - EPSILON)
x_axis.append(time_step + EPSILON)
y_axis.append(self.pdf(time_step - EPSILON))
y_axis.append(self.pdf(time_step + EPSILON))
plt.plot(x_axis, y_axis)
return plt
def __hash__(self):
return object.__hash__(self)
def __repr__(self):
return TimeInterval.__repr__(self)
def __str__(self):
return repr(self)
| agpl-3.0 |
mrocklin/partd | partd/tests/test_pandas.py | 2 | 3727 | from __future__ import absolute_import
import pytest
pytest.importorskip('pandas') # noqa
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import os
from partd.pandas import PandasColumns, PandasBlocks, serialize, deserialize
df1 = pd.DataFrame({'a': [1, 2, 3],
'b': [1., 2., 3.],
'c': ['x', 'y', 'x']}, columns=['a', 'b', 'c'],
index=pd.Index([1, 2, 3], name='myindex'))
df2 = pd.DataFrame({'a': [10, 20, 30],
'b': [10., 20., 30.],
'c': ['X', 'Y', 'X']}, columns=['a', 'b', 'c'],
index=pd.Index([10, 20, 30], name='myindex'))
def test_PandasColumns():
with PandasColumns() as p:
assert os.path.exists(p.partd.partd.path)
p.append({'x': df1, 'y': df2})
p.append({'x': df2, 'y': df1})
assert os.path.exists(p.partd.partd.filename('x'))
assert os.path.exists(p.partd.partd.filename(('x', 'a')))
assert os.path.exists(p.partd.partd.filename(('x', '.index')))
assert os.path.exists(p.partd.partd.filename('y'))
result = p.get(['y', 'x'])
tm.assert_frame_equal(result[0], pd.concat([df2, df1]))
tm.assert_frame_equal(result[1], pd.concat([df1, df2]))
with p.lock: # uh oh, possible deadlock
result = p.get(['x'], lock=False)
assert not os.path.exists(p.partd.partd.path)
def test_column_selection():
with PandasColumns('foo') as p:
p.append({'x': df1, 'y': df2})
p.append({'x': df2, 'y': df1})
result = p.get('x', columns=['c', 'b'])
tm.assert_frame_equal(result, pd.concat([df1, df2])[['c', 'b']])
def test_PandasBlocks():
with PandasBlocks() as p:
assert os.path.exists(p.partd.path)
p.append({'x': df1, 'y': df2})
p.append({'x': df2, 'y': df1})
assert os.path.exists(p.partd.filename('x'))
assert os.path.exists(p.partd.filename('y'))
result = p.get(['y', 'x'])
tm.assert_frame_equal(result[0], pd.concat([df2, df1]))
tm.assert_frame_equal(result[1], pd.concat([df1, df2]))
with p.lock: # uh oh, possible deadlock
result = p.get(['x'], lock=False)
assert not os.path.exists(p.partd.path)
@pytest.mark.parametrize('ordered', [False, True])
def test_serialize_categoricals(ordered):
frame = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['c', 'a', 'b', 'a'],
ordered=ordered)},
index=pd.Categorical(['x', 'y', 'z', 'x'],
ordered=ordered))
frame.index.name = 'foo'
frame.columns.name = 'bar'
for ind, df in [(0, frame), (1, frame.T)]:
df2 = deserialize(serialize(df))
tm.assert_frame_equal(df, df2)
def test_serialize_multi_index():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'a', 'b', 'c'],
'y': [1, 2, 3, 4, 5, 6],
'z': [7., 8, 9, 10, 11, 12]})
df = df.groupby([df.x, df.y]).sum()
df.index.name = 'foo'
df.columns.name = 'bar'
df2 = deserialize(serialize(df))
tm.assert_frame_equal(df, df2)
@pytest.mark.parametrize('base', [
pd.Timestamp('1987-03-3T01:01:01+0001'),
pd.Timestamp('1987-03-03 01:01:01-0600', tz='US/Central'),
])
def test_serialize(base):
df = pd.DataFrame({'x': [
base + pd.Timedelta(seconds=i)
for i in np.random.randint(0, 1000, size=10)],
'y': list(range(10)),
'z': pd.date_range('2017', periods=10)})
df2 = deserialize(serialize(df))
tm.assert_frame_equal(df, df2)
| bsd-3-clause |
hooram/ownphotos-backend | api/face_clustering.py | 1 | 4573 | from api.models import Face
from api.models import Person
import base64
import pickle
import itertools
from scipy import linalg
from sklearn.decomposition import PCA
import numpy as np
import matplotlib as mpl
from sklearn import cluster
from sklearn import mixture
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler
mpl.use('Agg')
import matplotlib.pyplot as plt
def compute_bic(kmeans,X):
"""
Computes the BIC metric for a given clusters
Parameters:
-----------------------------------------
kmeans: List of clustering object from scikit learn
X : multidimension np array of data points
Returns:
-----------------------------------------
BIC value
"""
# assign centers and labels
centers = [kmeans.cluster_centers_]
labels = kmeans.labels_
#number of clusters
m = kmeans.n_clusters
# size of the clusters
n = np.bincount(labels)
#size of data set
N, d = X.shape
#compute variance for all clusters beforehand
cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]],
'euclidean')**2) for i in range(m)])
const_term = 0.5 * m * np.log(N) * (d+1)
BIC = np.sum([n[i] * np.log(n[i]) -
n[i] * np.log(N) -
((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -
((n[i] - 1) * d/ 2) for i in range(m)]) - const_term
return(BIC)
faces_labelled = Face.objects.filter(person_label_is_inferred=False)
faces_all = Face.objects.all()
vecs_all = []
for face in faces_all:
r = base64.b64decode(face.encoding)
encoding = np.frombuffer(r,dtype=np.float64)
vecs_all.append(encoding)
vecs_all = np.array(vecs_all)
vecs_labelled = []
person_labels = []
for face in faces_labelled:
r = base64.b64decode(face.encoding)
encoding = np.frombuffer(r,dtype=np.float64)
vecs_labelled.append(encoding)
person_labels.append(face.person.name)
vecs_labelled = np.array(vecs_labelled)
pca = PCA(n_components=2)
vis_all = pca.fit_transform(vecs_all)
try:
vis_labelled = pca.transform(vecs_labelled)
except:
vis_labelled = None
X = vecs_all
ks = range(1,15)
bics = []
bests = []
num_experiments = 20
for i_bic in range(num_experiments):
print('bic experiment %d'%i_bic)
# run 9 times kmeans and save each result in the KMeans object
KMeans = [cluster.KMeans(n_clusters = i, init="k-means++").fit(vecs_all) for i in ks]
# now run for each cluster the BIC computation
BIC = np.log([compute_bic(kmeansi,X) for kmeansi in KMeans])
bests.append(np.argmax(BIC))
bics.append(BIC)
bics = np.array(bics)
fig = plt.figure()
plt.plot(np.arange(len(bics.mean(0)))+1,bics.mean(0))
fig.savefig('media/figs/bic.png')
plt.close(fig)
num_clusters = np.argmax(bics.mean(0))+1
print("number of clusters: %d"%num_clusters)
fig = plt.figure()
plt.scatter(vis_all.T[0],vis_all.T[1])
if vis_labelled is not None:
for i,vis in enumerate(vis_labelled):
plt.text(vis[0],vis[1], person_labels[i])
fig.savefig('media/figs/scatter.png')
plt.close(fig)
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram
Z = linkage(vecs_all,metric='euclidean',method='ward')
dendrogram(Z)
labels = [fcluster(Z,t,criterion='distance') for t in np.linspace(0,8,100)]
lens = [len(set(label)) for label in labels]
fig = plt.figure()
plt.plot(lens)
plt.grid()
fig.savefig('media/figs/linkage.png')
plt.close(fig)
fig = plt.figure(figsize=(5,5))
clusters = fcluster(Z,num_clusters,criterion='maxclust')
plt.scatter(vis_all.T[0],vis_all.T[1],marker='.',s=10,c=clusters)
if vis_labelled is not None:
for i,vis in enumerate(vis_labelled):
plt.text(vis[0],vis[1], person_labels[i])
# plt.xlim([-0.5,0.5])
# plt.ylim([-0.2,0.5])
plt.title('Face Clusters')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.yticks([])
plt.xticks([])
plt.tight_layout()
fig.savefig('media/figs/linkage_scatter.png')
plt.close(fig)
# for face,cluster in zip(faces_all, clusters):
# person_cluster = Person.objects.get_or_create(name="cluster_%d"%cluster,kind="CLUSTER",cluster_id=cluster)
# face.person = person_cluster[0]
# face.save()
#calculat average face embedding for each person model object
persons = Person.objects.all()
for person in persons:
encodings = []
faces = person.faces.all()
for face in faces:
r = base64.b64decode(face.encoding)
encoding = np.frombuffer(r,dtype=np.float64)
encodings.append(encoding)
| mit |
yanchen036/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 24 | 13826 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.train.AdamOptimizer(0.001), config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
terna/SLAPP3 | 6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/basic2D/display2D.py | 1 | 1838 | #display2d.py for the basic2D project
import matplotlib.pyplot as plt
def checkRunningInIPython():
try:
__IPYTHON__
return True
except NameError:
return False
def display2D(agentList, cycle, nCycles, sleep):
global IPy, ax, dots, fig, myDisplay # to avoid missing assignment errors
# preparing the frame space
if cycle==1:
IPy=checkRunningInIPython()
# Create a named display, if in iPython
if IPy: myDisplay = display(None, display_id=True)
#for i in range(len(agentList)):
# print("agent",agentList[i].number)
# prepare graphic space
fig, ax = plt.subplots(figsize=(7,7))
# asking the dimension of the world to one of the agents (the 0 one)
ax.set_xlim(agentList[0].lX, agentList[0].rX)
ax.set_ylim(agentList[0].bY, agentList[0].tY)
if IPy: dots = ax.plot([],[],'ro')
if not IPy:
plt.gca().cla()
# asking the dimension of the world to one of the agents (the 0 one)
ax.set_xlim(agentList[0].lX, agentList[0].rX)
ax.set_ylim(agentList[0].bY, agentList[0].tY)
# update data from the agents' world
xList=[]
yList=[]
for i in range(len(agentList)):
x,y=agentList[i].reportPos()
xList.append(x)
yList.append(y)
if IPy:
dots[0].set_data(xList, yList)
# dots[1] etc. for a second series
else:
ax.plot(xList,yList,'ro')
# display
if cycle==1: ax.set_title(str(cycle)+" initial frame")
if cycle>1 and cycle<nCycles: ax.set_title(str(cycle))
if cycle==nCycles: ax.set_title(str(cycle)+" final frame")
if IPy: myDisplay.update(fig)
else: fig.canvas.draw()
plt.pause(sleep+0.001) # if sleep is 0, it locks the figure
print("end cycle",cycle,"of the animation")
| cc0-1.0 |
njordsir/Movie-Script-Analysis | Doc2Vec and Classification/doc2vec_model.py | 1 | 9067 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 17 04:26:02 2016
@author: naman
"""
import multiprocessing
import json
from operator import itemgetter
import os
import gensim, logging
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import LabeledSentence
import re
import numpy as np
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#def get_text(path_to_json_file):
# with open(path_to_json_file, 'r') as fp:
# script=json.load(fp)
#
# script_text = list()
# for i in range(1, len(script)+1):
# scene = script[str(i)]
# list_dialogues = scene['char_dialogues']
# list_desc = scene['scene_descriptons_list']
# list_scene = list_dialogues + list_desc
# list_scene = sorted(list_scene)
# list_scene = [l[1:] for l in list_scene ]
# list_scene = [' '.join(l) for l in list_scene]
# text = ' . '.join(list_scene).encode('utf-8')
# if len(text.split()) > 10:
# script_text.append(text)
# return script_text
def get_text(directory, filename):
fp=open(directory+'/'+filename, 'r')
script = fp.readlines()
script=[re.sub('<.*?>', '', s).rstrip('\n').rstrip('\r').strip() for s in script]
script=[s for s in script if s!='']
partition_size= len(script)/200
if partition_size == 0:
return list((-1,''))
partitions=list()
partition=''
for i in range(0,200):
for j in range(i*partition_size, (i+1)*partition_size):
partition += ' '+script[j]
index = partition_size*(i+1)-1
partitions.append(partition)
tmp_line=''.join(e for e in script[index] if e.isalpha())
if(tmp_line.isupper()):
partition=script[index]
else:
partition=''
if len(script) % 200 != 0:
partition=''
for s in script[index:]:
partition += s +' . '
partitions[-1] += partition
return list(enumerate(partitions))
directory='/home/naman/SNLP/imsdb'
text_files=os.listdir(directory)
scripts_text=list()
scripts_text1=list()
for f in text_files:
scripts_text.append((f[:-4].replace(',',''), get_text(directory,f)))
scripts_text1.append(f[:-4])
all_scripts=list()
for movie in scripts_text:
if movie[1][0] == -1:
continue
all_scripts += ([("%s_%d"% (movie[0],scene_num), scene) for scene_num, scene in movie[1]])
#==============================================================================
# docLabels = [t[0] for t in all_scripts]
# docs = [t[1] for t in all_scripts]
#
# def remove_punc(text):
# punc=['.',',','!','?']
# new_text=''.join(e for e in text if e not in punc)
# return new_text
#
# class DocIterator(object):
#
# #SPLIT_SENTENCES = re.compile(u"[.!?:]\s+") # split sentences on these characters
#
# def __init__(self, doc_list, labels_list):
# self.labels_list = labels_list
# self.doc_list = doc_list
# def __iter__(self):
# for idx, doc in enumerate(self.doc_list):
# yield TaggedDocument(words=remove_punc(doc),tags=[self.labels_list[idx]])
#
# it=DocIterator(docs, docLabels)
#
# model = gensim.models.Doc2Vec(size=300,
# window=10,
# min_count=1,
# workers=3,
# alpha=0.025,
# min_alpha=0.025) # use fixed learning rate
#
# model.build_vocab(it)
#
# for epoch in range(10):
# model.train(it)
# model.alpha -= 0.002 # decrease the learning rate
# model.min_alpha = model.alpha # fix the learning rate, no deca
# model.train(it)
# print "Epoch %d Completed" % epoch
#
# model.save('/home/naman/SNLP/imsdb_doc2vec_scriptpartitions_nopunc.model')
#
#==============================================================================
model = gensim.models.Doc2Vec.load('/home/naman/SNLP/imsdb_doc2vec_scriptpartitions.model')
centroid_vectors=list()
variance_vectors=list()
ii=-1
scripts_text2=list()
for movie,_ in scripts_text:
ii+=1
movie_labels=[movie+'_%d' % i for i in range(0,200)]
try:
vectors=model.docvecs[movie_labels]
scripts_text2.append(scripts_text1[ii])
except KeyError:
continue
centroid=np.mean(vectors,axis=0)
centroid_vectors.append((movie,centroid))
variance=np.diag(np.cov(vectors.T))
variance_vectors.append((movie, variance))
#central_vectors=np.array([c[1] for c in central_vectors])
import pandas as pd
cols=range(300)
var_vectors=[[v[0]]+list(v[1]) for v in variance_vectors]
mean_vectors=[[m[0]]+list(m[1]) for m in centroid_vectors]
df_m=pd.DataFrame(mean_vectors,columns=['Movie']+cols)
df_v=pd.DataFrame(var_vectors,columns=['Movie']+cols)
from sklearn.decomposition import PCA
mean_components=5
var_components=5
pca = PCA(n_components=mean_components)
pca.fit(df_m.ix[:,1:])
X = pca.transform(df_m.ix[:,1:])
dim_reduced=zip([v[0] for v in mean_vectors], X)
dim_reduced=[[v[0]]+list(v[1]) for v in dim_reduced]
df_mean=pd.DataFrame(dim_reduced,columns=['Movie']+range(mean_components))
pca = PCA(n_components=var_components)
pca.fit(df_v.ix[:,1:])
X = pca.transform(df_v.ix[:,1:])
dim_reduced=zip([v[0] for v in var_vectors], X)
dim_reduced=[[v[0]]+list(v[1]) for v in dim_reduced]
df_var=pd.DataFrame(dim_reduced,columns=['Movie']+range(var_components))
df=pd.merge(df_mean,df_var, on='Movie')
df_full=pd.merge(df_m, df_v, on='Movie')
ratings=pd.read_pickle('/home/naman/SNLP/ratings.pkl')['ratings']
ratings=list(ratings.items())
r_mean=np.mean([t[1] for t in ratings])
r_var=np.var([t[1] for t in ratings])
def label(a,m,v):
if a>m+v:
return 1
elif a<m-v:
return -1
else:
return 0
ratings_labels=[(r[0],label(r[1], r_mean, r_var)) for r in ratings]
df_ratings=pd.DataFrame(ratings_labels, columns=['Movie', 'Rating'])
df['Movie']=scripts_text2
df_full['Movie']=scripts_text2
df_final_full=pd.merge(df_full, df_ratings, on='Movie')
df_Shankar=pd.read_pickle('/home/naman/SNLP/char_net_final_II.pkl')
df_Shankar.columns=['Movie']+range(8)
df_Jar=pd.read_pickle('/home/naman/SNLP/emotion_binwise2.pkl')
df_Jar.columns=['Movie']+range(100,110)
df_Shankar2=pd.read_pickle('/home/naman/SNLP/topic_overlap.pkl')
df_Shankar2.columns=['Movie']+range(200,210)
df_Shankar=pd.merge(df_Shankar, df_ratings, on='Movie')
df_Shankar2=pd.merge(df_Shankar2, df_ratings, on='Movie')
df_Jar=pd.merge(df_Jar, df_ratings, on='Movie')
df_Naman=pd.merge(df, df_ratings, on='Movie')
df_final=pd.merge(df_Naman.ix[:,:-1], df_Shankar.ix[:,:-1], on='Movie')
df_final=pd.merge(df_final, df_Shankar2.ix[:,:-1], on='Movie')
df_final=pd.merge(df_final, df_Jar.ix[:,:-1], on='Movie')
df_final=pd.merge(df_final, df_ratings, on='Movie')
X_Naman=df_Naman.ix[:,1:-1]
Y_Naman=df_Naman.ix[:,-1]
X_Shankar=df_Shankar.ix[:,1:-1]
Y_Shankar=df_Shankar.ix[:,-1]
X_Shankar2=df_Shankar2.ix[:,1:-1]
Y_Shankar2=df_Shankar2.ix[:,-1]
X_Jar=df_Jar.ix[:,1:-1]
Y_Jar=df_Jar.ix[:,-1]
pd.to_pickle(df_Naman, '/home/naman/SNLP/FinalVectors_D2V.pkl')
pd.to_pickle(df_Naman.ix[:,:-1], '/home/naman/SNLP/FinalVectors_Naman.pkl')
pd.to_pickle(df_final_full, '/home/naman/SNLP/FinalVectors_Full_D2V.pkl')
X_final=df_final.ix[:,1:-1]
Y_final=df_final.ix[:,-1]
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
clf = SVC()
scores_Naman = cross_val_score(clf, X_Naman, Y_Naman, cv=10)
scores_Shankar = cross_val_score(clf, X_Shankar, Y_Shankar, cv=10)
scores_Shankar2 = cross_val_score(clf, X_Shankar, Y_Shankar, cv=10)
scores_Jar = cross_val_score(clf, X_Jar, Y_Jar, cv=10)
scores_final = cross_val_score(clf, X_final, Y_final, cv=10)
scores=np.vstack((scores_Shankar, scores_Shankar2, scores_Jar, scores_Naman)).T
scores=pd.DataFrame(scores, columns=['CharacterNetworks','TopicOverlap','EmotionAnalysis','Doc2Vec'])
from sklearn.cross_validation import train_test_split
#X_train, X_test, Y_train, Y_test = train_test_split(X_final, Y_final, stratify=Y_final)
clf1=SVC()
clf1.fit(X_final, Y_final)
Y_pred=clf1.predict(X_final)
from sklearn.metrics import confusion_matrix
result=confusion_matrix(Y_final, Y_pred)
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
plt.style.use('ggplot')
tsne_model = TSNE(n_components=2, random_state=0)
#tsne_op = tsne_model.fit_transform(np.array([c[1] for c in variance_vectors]))
tsne_op = tsne_model.fit_transform(np.array(df_final_full.ix[:,1:-1]))
plt.figure(figsize=(10,10))
plt.scatter(tsne_op[:,0], tsne_op[:,1])
plt.show()
#plotting some movies document vectors for all scenes
movie= 'Terminator Salvation'
movie_labels=[movie+'_%d' % i for i in range(0,200)]
vectors=model.docvecs[movie_labels]
tsne_model = TSNE(n_components=2, random_state=0)
tsne_op = tsne_model.fit_transform(vectors)
plt.figure(figsize=(10,10))
plt.scatter(tsne_op[:,0], tsne_op[:,1])
plt.show()
| mit |
gregcaporaso/short-read-tax-assignment | tax_credit/eval_framework.py | 4 | 44170 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, tax-credit development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from sys import exit
from glob import glob
from os.path import abspath, join, exists, split, dirname
from collections import defaultdict
from functools import partial
from random import shuffle
from shutil import copy
from biom.exception import TableException
from biom import load_table
from biom.cli.util import write_biom_table
import pandas as pd
from tax_credit import framework_functions, taxa_manipulator
def get_sample_to_top_params(df, metric, sample_col='SampleID',
method_col='Method', dataset_col='Dataset',
ascending=False):
""" Identify the top-performing methods for a given metric
Parameters
----------
df: pd.DataFrame
metric: Column header defining the metric to compare parameter combinations
with
sample_col: Column header defining the SampleID
method_col: Column header defining the method name
dataset_col: Column header defining the dataset name
Returns
-------
pd.DataFrame
Rows: Multi-index of (Dataset, SampleID)
Cols: methods
Values: list of Parameters that achieve the highest performance for each
method
"""
sorted_df = df.sort_values(by=metric, ascending=False)
result = {}
for dataset in sorted_df[dataset_col].unique():
dataset_df = sorted_df[sorted_df[dataset_col] == dataset]
for sid in dataset_df[sample_col].unique():
dataset_sid_res = dataset_df[dataset_df[sample_col] == sid]
current_results = {}
for method in sorted_df.Method.unique():
m_res = dataset_sid_res[dataset_sid_res.Method == method]
mad_metric_value = m_res[metric].mad()
# if higher values are better, find params within MAD of max
if ascending is False:
max_val = m_res[metric].max()
tp = m_res[m_res[metric] >= (max_val - mad_metric_value)]
# if lower values are better, find params within MAD of min
else:
min_val = m_res[metric].min()
tp = m_res[m_res[metric] <= (min_val + mad_metric_value)]
current_results[method] = list(tp.Parameters)
result[(dataset, sid)] = current_results
result = pd.DataFrame(result).T
return result
def parameter_comparisons(
df, method, metrics=['Precision', 'Recall', 'F-measure',
'Taxon Accuracy Rate', 'Taxon Detection Rate'],
sample_col='SampleID', method_col='Method',
dataset_col='Dataset', ascending=None):
""" Count the number of times each parameter combination achieves the top
score
Parameters
----------
df: pd.DataFrame
method: method of interest
metrics: metrics to include as headers in the resulting DataFrame
Returns
-------
pd.DataFrame
Rows: Parameter combination
Cols: metrics, Mean
Values: Mean: average value of all other columns in row; metrics: count of
times a parameter combination achieved the best score for the given
metric
"""
result = {}
# Default to descending sort for all metrics
if ascending is None:
ascending = {m: False for m in metrics}
for metric in metrics:
df2 = get_sample_to_top_params(df, metric, sample_col=sample_col,
method_col=method_col,
dataset_col=dataset_col,
ascending=ascending[metric])
current_result = defaultdict(int)
for optimal_parameters in df2[method]:
for optimal_parameter in optimal_parameters:
current_result[optimal_parameter] += 1
result[metric] = current_result
result = pd.DataFrame.from_dict(result)
result.fillna(0, inplace=True)
result = result.sort_values(by=metrics[-1], ascending=False)
return result
def find_and_process_result_tables(start_dir,
biom_processor=abspath,
filename_pattern='table*biom'):
""" given a start_dir, return list of tuples describing the table and
containing the processed table
start_dir: top-level directory to use when starting the walk
biom_processor: takes a relative path to a biom file and does
something with it. default is call abspath on it to convert the
relative path to an absolute path, but could also be
load_table, for example. Not sure if we'll want this, but
it's easy to hook up.
filename_pattern: pattern to use when matching filenames, can contain
globbable (i.e., bash-style) wildcards (default: "table*biom")
results = [(data-set-id, reference-id, method-id, parameters-id,
biom_processor(table_fp)),
...
]
"""
results = []
table_fps = glob(join(start_dir, '*', '*', '*', '*', filename_pattern))
for table_fp in table_fps:
param_dir, _ = split(table_fp)
method_dir, param_id = split(param_dir)
reference_dir, method_id = split(method_dir)
dataset_dir, reference_id = split(reference_dir)
_, dataset_id = split(dataset_dir)
results.append((dataset_id, reference_id, method_id, param_id,
biom_processor(table_fp)))
return results
def find_and_process_expected_tables(start_dir,
biom_processor=abspath,
filename_pattern='table.L{0}-taxa.biom',
level=6):
""" given a start_dir, return list of tuples describing the table and
containing the processed table
start_dir: top-level directory to use when starting the walk
biom_processor: takes a relative path to a biom file and does
something with it. default is call abspath on it to convert the
relative path to an absolute path, but could also be
load_table, for example. Not sure if we'll want this, but
it's easy to hook up.
filename_pattern: pattern to use when matching filenames, can contain
globbable (i.e., bash-style) wildcards (default: "table*biom")
results = [(data-set-id, reference-id, biom_processor(table_fp)),
...
]
"""
filename = filename_pattern.format(level)
table_fps = glob(join(start_dir, '*', '*', 'expected', filename))
results = []
for table_fp in table_fps:
expected_dir, _ = split(table_fp)
reference_dir, _ = split(expected_dir)
dataset_dir, reference_id = split(reference_dir)
_, dataset_id = split(dataset_dir)
results.append((dataset_id, reference_id, biom_processor(table_fp)))
return results
def get_expected_tables_lookup(start_dir,
biom_processor=abspath,
filename_pattern='table.L{0}-taxa.biom',
level=6):
""" given a start_dir, return list of tuples describing the expected table
and containing the processed table
start_dir: top-level directory to use when starting the walk
biom_processor: takes a relative path to a biom file and does
something with it. default is call abspath on it to convert the
relative path to an absolute path, but could also be
load_table, for example. Not sure if we'll want this, but
it's easy to hook up.
filename_pattern: pattern to use when matching filenames, can contain
globbable (i.e., bash-style) wildcards (default: "table*biom")
"""
results = defaultdict(dict)
expected_tables = find_and_process_expected_tables(
start_dir, biom_processor, filename_pattern, level)
for dataset_id, reference_id, processed_table in expected_tables:
results[dataset_id][reference_id] = processed_table
return results
def get_observed_observation_ids(table, sample_id=None, ws_strip=False):
""" Return the set of observation ids with count > 0 in sample_id
table: the biom table object to analyze
sample_id: the sample_id to test (default is first sample id in
table.SampleIds)
"""
if sample_id is None:
sample_id = table.ids(axis="sample")[0]
result = []
for observation_id in table.ids(axis="observation"):
if table.get_value_by_ids(observation_id, sample_id) > 0.0:
# remove all whitespace from observation_id
if ws_strip is True:
observation_id = "".join(observation_id.split())
result.append(observation_id)
return set(result)
def compute_taxon_accuracy(actual_table, expected_table, actual_sample_id=None,
expected_sample_id=None):
""" Compute taxon accuracy and detection rates based on presence/absence of
observations
actual_table: table containing results achieved for query
expected_table: table containing expected results
actual_sample_id: sample_id to test (default is first sample id in
actual_table.SampleIds)
expected_sample_id: sample_id to test (default is first sample id in
expected_table.SampleIds)
"""
actual_obs_ids = get_observed_observation_ids(actual_table,
actual_sample_id,
ws_strip=True)
expected_obs_ids = get_observed_observation_ids(expected_table,
expected_sample_id,
ws_strip=True)
tp = len(actual_obs_ids & expected_obs_ids)
fp = len(actual_obs_ids - expected_obs_ids)
fn = len(expected_obs_ids - actual_obs_ids)
if tp > 0:
p = tp / (tp + fp)
r = tp / (tp + fn)
else:
p, r = 0, 0
return p, r
def get_taxonomy_collapser(level, md_key='taxonomy',
unassignable_label='unassigned'):
""" Returns fn to pass to table.collapse
level: the level to collapse on in the "taxonomy" observation
metdata category
"""
def f(id_, md):
try:
levels = [l.strip() for l in md[md_key].split(';')]
except AttributeError:
try:
levels = [l.strip() for l in md[md_key]]
# if no metadata is listed for observation, group as Unassigned
except TypeError:
levels = [unassignable_label]
# this happens if the table is empty
except TypeError:
levels = [unassignable_label]
result = ';'.join(levels[:level+1])
return result
return f
def filter_table(table, min_count=0, taxonomy_level=None,
taxa_to_keep=None, md_key='taxonomy'):
try:
_taxa_to_keep = ';'.join(taxa_to_keep)
except TypeError:
_taxa_to_keep = None
def f(data_vector, id_, metadata):
# if filtering based on number of taxonomy levels, and this
# observation has taxonomic information, and
# there are a sufficient number of taxonomic levels
# Table filtering here removed taxa that have insufficient levels
enough_levels = taxonomy_level is None or \
(metadata[md_key] is not None and
len(metadata[md_key]) >= taxonomy_level+1)
# if filtering to specific taxa, this OTU is assigned to that taxonomy
allowed_taxa = _taxa_to_keep is None or \
id_.startswith(_taxa_to_keep) or \
(metadata is not None and md_key in metadata and
';'.join(metadata[md_key]).startswith(_taxa_to_keep))
# the count of this observation is at least min_count
sufficient_count = data_vector.sum() >= min_count
return sufficient_count and allowed_taxa and enough_levels
return table.filter(f, axis='observation', inplace=False)
def seek_results(results_dirs, dataset_ids=None, reference_ids=None,
method_ids=None, parameter_ids=None):
'''Iterate over a list of directories to find results files and pass these
to find_and_process_result_tables.
dataset_ids: list
dataset ids (mock community study ID) to process. Defaults to None
(process all).
reference_ids: list
reference database data to process. Defaults to None (process all).
method_ids: list
methods to process. Defaults to None (process all).
parameter_ids: list
parameters to process. Defaults to None (process all).
'''
# Confirm that mock results exist and process tables of observations
results = []
for results_dir in results_dirs:
assert exists(results_dir), '''Mock community result directory
does not exist: {0}'''.format(results_dir)
results += find_and_process_result_tables(results_dir)
# filter results if specifying any datasets/references/methods/parameters
if dataset_ids:
results = [d for d in results if d[0] in dataset_ids]
if reference_ids:
results = [d for d in results if d[1] in reference_ids]
if method_ids:
results = [d for d in results if d[2] in method_ids]
if parameter_ids:
results = [d for d in results if d[3] in parameter_ids]
return results
def evaluate_results(results_dirs, expected_results_dir, results_fp, mock_dir,
taxonomy_level_range=range(2, 7), min_count=0,
taxa_to_keep=None, md_key='taxonomy',
dataset_ids=None, reference_ids=None,
method_ids=None, parameter_ids=None, subsample=False,
filename_pattern='table.L{0}-taxa.biom', size=10,
per_seq_precision=False, exclude=['other'], backup=True,
force=False, append=False):
'''Load observed and expected observations from tax-credit, compute
precision, recall, F-measure, and correlations, and return results
as dataframe.
results_dirs: list of directories containing precomputed taxonomy
assignment results to evaluate. Must be in format:
results_dirs/<dataset name>/
<reference name>/<method>/<parameters>/
expected_results_dir: directory containing expected composition data in
the structure:
expected_results_dir/<dataset name>/<reference name>/expected/
results_fp: path to output file containing evaluation results summary.
mock_dir: path
Directory of mock community directiories containing mock feature
tables without taxonomy.
taxonomy_level_range: RANGE of taxonomic levels to evaluate.
min_count: int
Minimum abundance threshold for filtering taxonomic features.
taxa_to_keep: list of taxonomies to retain, others are removed before
evaluation.
md_key: metadata key containing taxonomy metadata in observed taxonomy
biom tables.
dataset_ids: list
dataset ids (mock community study ID) to process. Defaults to None
(process all).
reference_ids: list
reference database data to process. Defaults to None (process all).
method_ids: list
methods to process. Defaults to None (process all).
parameter_ids: list
parameters to process. Defaults to None (process all).
subsample: bool
Randomly subsample results for test runs.
size: int
Size of subsample to take.
exclude: list
taxonomies to explicitly exclude from precision scoring.
backup: bool
Backup pre-existing results before overwriting? Will overwrite
previous backups, and will only backup if force or append ==True.
force: bool
Overwrite pre-existing results_fp?
append: bool
Append new data to results_fp? Behavior of force and append will
depend on whether the data in results_dirs have already been
calculated in results_fp, and have interacting effects:
if force= append= Action
True True Append new to results_fp; pre-existing results
are overwritten if they are requested by the
"results params": dataset_ids, reference_ids,
method_ids, parameter_ids. If these should be
excluded and results_fp should only include
results specifically requested, use force==True
and append==False.
True False Overwrite results_fp with results requested by
"results params".
False True Load results_fp and append new to results_fp;
pre-existing results are not overwritten even
if requested by "results params".
False False Load results_fp. If "results params" are set,
the dataframe returned by this function is
automatically filtered to include only those
results.
'''
# Define the subdirectories where the query mock community data should be
results = seek_results(
results_dirs, dataset_ids, reference_ids, method_ids, parameter_ids)
if subsample is True:
shuffle(results)
results = results[:size]
# Process tables of expected taxonomy composition
expected_tables = get_expected_tables_lookup(
expected_results_dir, filename_pattern=filename_pattern)
# Compute accuracy results OR read in pre-existing mock_results_fp
if not exists(results_fp) or force:
# if append is True, load pre-existing results prior to overwriting
if exists(results_fp) and append and (
dataset_ids or reference_ids or method_ids or parameter_ids):
old_results = pd.DataFrame.from_csv(results_fp, sep='\t')
# overwrite results that are explicitly requested by results params
old_results = _filter_mock_results(
old_results, dataset_ids, reference_ids, method_ids,
parameter_ids)
# compute accuracy results
mock_results = compute_mock_results(
results, expected_tables, results_fp, mock_dir,
taxonomy_level_range, min_count=min_count,
taxa_to_keep=taxa_to_keep, md_key=md_key,
per_seq_precision=per_seq_precision, exclude=exclude)
# if append is True, add new results to old
if exists(results_fp) and append and (
dataset_ids or reference_ids or method_ids or parameter_ids):
mock_results = pd.concat([mock_results, old_results])
# write
_write_mock_results(mock_results, results_fp, backup)
# if force is False, load precomputed results and append/filter as required
else:
print("{0} already exists.".format(results_fp))
print("Reading in pre-computed evaluation results.")
print("To overwrite, set force=True")
mock_results = pd.DataFrame.from_csv(results_fp, sep='\t')
# if append is True, add results explicitly requested in results params
# if those data are absent from mock_results.
if append:
# remove results (to compute) if they already exist in mock_results
# *** one potential bug with my approach here is that this does not
# *** check whether results have been computed at all taxonomic
# *** levels, on all samples, etc. Hence, if results are missing
# *** for any reason but are present at other levels or samples in
# *** that mock community, they will be skipped. This is probably
# *** a negligible problem right now — users should make sure they
# *** know they are being consistent and can always overwrite if
# *** something has gone wrong and need to bypass this behavior.
results = [r for r in results if not
((mock_results['Dataset'] == r[0]) &
(mock_results['Reference'] == r[1]) &
(mock_results['Method'] == r[2]) &
(mock_results['Parameters'] == r[3])).any()]
print("append==True and force==False")
print(len(results), "new results have been appended to results.")
# merge any new results with pre-computed results, write out
if len(results) >= 1:
new_mock_results = compute_mock_results(
results, expected_tables, results_fp, mock_dir,
taxonomy_level_range, min_count=min_count,
taxa_to_keep=taxa_to_keep, md_key=md_key,
per_seq_precision=per_seq_precision, exclude=exclude)
mock_results = pd.concat([mock_results, new_mock_results])
# write. note we only do this if we actually append results!
_write_mock_results(mock_results, results_fp, backup)
# if append is false and results params are set, filter loaded data
elif dataset_ids or reference_ids or method_ids or parameter_ids:
print("Results have been filtered to only include datasets or "
"reference databases or methods or parameters that are "
"explicitly set by results params. To disable this "
"function and load all results, set dataset_ids and "
"reference_ids and method_ids and parameter_ids to None.")
mock_results = _filter_mock_results(
mock_results, dataset_ids, reference_ids, method_ids,
parameter_ids)
return mock_results
def _write_mock_results(mock_results, results_fp, backup=True):
if backup:
copy(results_fp, ''.join([results_fp, '.bk']))
mock_results.to_csv(results_fp, sep='\t')
def _filter_mock_results(mock_results, dataset_ids, reference_ids, method_ids,
parameter_ids):
'''Filter mock results dataframe on dataset_ids, reference_ids, method_ids,
and parameter_ids'''
if dataset_ids:
mock_results = filter_df(mock_results, 'Dataset', dataset_ids)
if reference_ids:
mock_results = filter_df(
mock_results, 'Reference', reference_ids)
if method_ids:
mock_results = filter_df(mock_results, 'Method', method_ids)
if parameter_ids:
mock_results = filter_df(
mock_results, 'Parameters', parameter_ids)
return mock_results
def filter_df(df_in, column_name=None, values=None, exclude=False):
'''Filter pandas df to contain only rows with column_name values that are
listed in values.
df_in: pd.DataFrame
column_name: str
Name of column in df_in to filter on
values: list
List of values to select for (or against) in column_name
exclude: bool
Exclude values in column name, instead of selecting.
'''
if column_name:
if exclude:
df_in = df_in[~df_in[column_name].isin(values)]
else:
df_in = df_in[df_in[column_name].isin(values)]
return df_in
def mount_observations(table_fp, min_count=0, taxonomy_level=6,
taxa_to_keep=None, md_key='taxonomy', normalize=True,
clean_obs_ids=True, filter_obs=True):
'''load biom table, filter by abundance, collapse taxonomy, return biom.
table_fp: path
Input biom table.
min_count: int
Minimum abundance threshold; features detected at lower abundance are
removed from table.
taxonomy_level: int
Taxonomic level at which to collapse table.
taxa_to_keep: list of taxonomies to retain, others are removed before
evaluation.
md_key: str
biom observation metadata key on which to collapse and filter.
normalize: bool
Normalize table to relative abundance across sample rows?
clean_obs_ids: bool
Remove '[]()' characters from observation ids? (these are removed from
the ref db during filtering/cleaning steps, and should be removed from
expected taxonomy files to avoid mismatches).
filter_obs: bool
Filter observations? filter_table will remove observations if taxonomy
strings are shorter than taxonomic_level, count is less than min_count,
or observation is not included in taxa_to_keep.
'''
try:
table = load_table(table_fp)
except ValueError:
raise ValueError("Couldn't parse BIOM table: {0}".format(table_fp))
if filter_obs is True and min_count > 0 and taxa_to_keep is not None:
try:
table = filter_table(table, min_count, taxonomy_level,
taxa_to_keep, md_key=md_key)
except TableException:
# if all data is filtered out, move on to the next table
pass
except TypeError:
print("Missing taxonomic information in table " + table_fp)
if table.is_empty():
raise ValueError("Table is empty after filtering at"
" {0}".format(table_fp))
collapse_taxonomy = get_taxonomy_collapser(taxonomy_level, md_key=md_key)
try:
table = table.collapse(collapse_taxonomy, axis='observation',
min_group_size=1)
except TableException:
raise TableException("Failure to collapse taxonomy for table at:"
" {0}".format(table_fp))
except TypeError:
raise TypeError("Failure to collapse taxonomy: {0}".format(table_fp))
if normalize is True:
table.norm(axis='sample')
return table
def compute_mock_results(result_tables, expected_table_lookup, results_fp,
mock_dir, taxonomy_level_range=range(2, 7),
min_count=0,
taxa_to_keep=None, md_key='taxonomy',
per_seq_precision=False, exclude=None):
""" Compute precision, recall, and f-measure for result_tables at
taxonomy_level
result_tables: 2d list of tables to be compared to expected tables,
where the data in the inner list is:
[dataset_id, reference_database_id, method_id,
parameter_combination_id, table_fp]
expected_table_lookup: 2d dict of dataset_id, reference_db_id to BIOM
table filepath, for the expected result tables
taxonomy_level_range: range of levels to compute results
results_fp: path to output file containing evaluation results summary
mock_dir: path
Directory of mock community directories that contain feature tables
without taxonomy.
per_seq_precision: bool
Compute per-sequence precision/recall scores from expected
taxonomy assignments?
exclude: list
taxonomies to explicitly exclude from precision scoring.
"""
results = []
for dataset_id, ref_id, method, params, actual_table_fp in result_tables:
# Find expected results
try:
expected_table_fp = expected_table_lookup[dataset_id][ref_id]
except KeyError:
raise KeyError("Can't find expected table for \
({0}, {1}).".format(dataset_id, ref_id))
for taxonomy_level in taxonomy_level_range:
# parse the expected table (unless taxonomy_level is specified,
# this should be collapsed on level 6 taxonomy)
expected_table = mount_observations(expected_table_fp,
min_count=0,
taxonomy_level=taxonomy_level,
taxa_to_keep=taxa_to_keep,
filter_obs=False)
# parse the actual table and collapse it at the specified
# taxonomic level
actual_table = mount_observations(actual_table_fp,
min_count=min_count,
taxonomy_level=taxonomy_level,
taxa_to_keep=taxa_to_keep,
md_key=md_key)
# load the feature table without taxonomy assignment
# we use this for per-sequence precision
feature_table_fp = join(mock_dir, dataset_id, 'feature_table.biom')
try:
feature_table = load_table(feature_table_fp)
except ValueError:
raise ValueError(
"Couldn't parse BIOM table: {0}".format(feature_table_fp))
for sample_id in actual_table.ids(axis="sample"):
# compute precision, recall, and f-measure
try:
accuracy, detection = compute_taxon_accuracy(
actual_table, expected_table,
actual_sample_id=sample_id,
expected_sample_id=sample_id)
except ZeroDivisionError:
accuracy, detection = -1., -1., -1.
# compute per-sequence precion / recall
if per_seq_precision and exists(join(
dirname(expected_table_fp), 'trueish-taxonomies.tsv')):
p, r, f = per_sequence_precision(
expected_table_fp, actual_table_fp, feature_table,
sample_id, taxonomy_level, exclude=exclude)
else:
p, r, f = -1., -1., -1.
# log results
results.append((dataset_id, taxonomy_level, sample_id,
ref_id, method, params, p, r, f, accuracy,
detection))
result = pd.DataFrame(results, columns=["Dataset", "Level", "SampleID",
"Reference", "Method",
"Parameters", "Precision",
"Recall", "F-measure",
"Taxon Accuracy Rate",
"Taxon Detection Rate"])
return result
def _multiple_match_kludge(exp, obs, fill_empty_observations=True):
'''Sort expected and observed lists and kludge to deal with cases where we
were unable to unambiguously select an expected taxonomy'''
obs = {i: t for i, t in [r.split('\t', 1) for r in obs]}
exp_grouped = defaultdict(list)
for exp_id, exp_taxon in [r.split('\t') for r in exp]:
exp_grouped[exp_id].append(exp_taxon)
if fill_empty_observations:
for k in exp_grouped.keys():
if k not in obs.keys():
obs[k] = 'Unassigned'
else:
assert obs.keys() == exp_grouped.keys(),\
'observed and expected read labels differ:\n' + \
str(list(obs.keys())) + '\n' + str(list(exp_grouped.keys()))
new_exp = []
new_obs = []
for exp_id, exp_taxons in exp_grouped.items():
obs_row = '\t'.join([exp_id, obs[exp_id]])
for exp_taxon in exp_taxons:
if obs[exp_id].startswith(exp_id):
row = '\t'.join([exp_id, exp_taxon])
if len(exp_taxons) > 1:
print('exp')
print(row)
print('obs')
print(obs_row)
print('candidates')
for e in exp_taxons:
print(e)
break
else:
row = '\t'.join([exp_id, exp_taxons[0]])
new_exp.append(row)
new_obs.append(obs_row)
return new_exp, new_obs
def per_sequence_precision(expected_table_fp, actual_table_fp, feature_table,
sample_id, taxonomy_level, exclude=None):
'''Precision/recall on individual representative sequences in a mock
community.
'''
# locate expected and observed taxonomies
exp_fp = join(dirname(expected_table_fp), 'trueish-taxonomies.tsv')
if exists(exp_fp):
obs_dir = dirname(actual_table_fp)
if exists(join(obs_dir, 'rep_seqs_tax_assignments.txt')):
obs_fp = join(obs_dir, 'rep_seqs_tax_assignments.txt')
elif exists(join(obs_dir, 'taxonomy.tsv')):
obs_fp = join(obs_dir, 'taxonomy.tsv')
else:
raise RuntimeError('taxonomy assignments do not exist '
'for dataset {0}'.format(obs_dir))
# compile lists of taxa only if observed in current sample
exp = observations_to_list(exp_fp, feature_table, sample_id)
obs = observations_to_list(obs_fp, feature_table, sample_id)
try:
exp, obs = _multiple_match_kludge(exp, obs)
except AssertionError:
print('AssertionError in:')
print(obs_dir)
raise
# truncate taxonomies to the desired level
exp_taxa, obs_taxa = framework_functions.load_prf(
obs, exp, level=slice(0, taxonomy_level+1), sort=False)
# compile sample weights (observations per sequence in sample)
weights = [feature_table.get_value_by_ids(
line.split('\t')[0], sample_id) for line in exp]
# run precision/recall
ps, rs, fs = framework_functions.compute_prf(
exp_taxa, obs_taxa, test_type='mock',
sample_weight=weights, exclude=exclude)
else:
ps, rs, fs = -1., -1., -1.
return ps, rs, fs
def observations_to_list(obs_fp, actual_table, sample_id):
'''extract lines from obs_fp to list if they are observed in a given
sample in biom table actual_table. Returns list of lines from obs_fp,
which maps biom observation ids (first value in each line) to taxonomy
labels in tab-delimited file.
'''
obs = taxa_manipulator.import_to_list(obs_fp)
obs = [line for line in obs if
actual_table.exists(line.split('\t')[0], "observation") and
actual_table.get_value_by_ids(line.split('\t')[0], sample_id) != 0]
return obs
def add_sample_metadata_to_table(table_fp, dataset_id, reference_id,
min_count=0, taxonomy_level=6,
taxa_to_keep=None, md_key='taxonomy',
method='expected', params='expected'):
'''load biom table and populate with sample metadata, then change sample
names.
'''
table = mount_observations(table_fp, min_count=min_count,
taxonomy_level=taxonomy_level,
taxa_to_keep=taxa_to_keep, md_key=md_key)
metadata = {s_id: {'sample_id': s_id,
'dataset': dataset_id,
'reference': reference_id,
'method': method,
'params': params}
for s_id in table.ids(axis='sample')}
table.add_metadata(metadata, 'sample')
new_ids = {s_id: '_'.join([method, params, s_id])
for s_id in table.ids(axis='sample')}
return table.update_ids(new_ids, axis='sample')
def merge_expected_and_observed_tables(expected_results_dir, results_dirs,
md_key='taxonomy', min_count=0,
taxonomy_level=6, taxa_to_keep=None,
biom_fp='merged_table.biom',
filename_pattern='table.L{0}-taxa.biom',
dataset_ids=None, reference_ids=None,
method_ids=None, parameter_ids=None,
force=False):
'''For each dataset in expected_results_dir, merge expected and observed
taxonomy compositions.
dataset_ids: list
dataset ids (mock community study ID) to process. Defaults to None
(process all).
reference_ids: list
reference database data to process. Defaults to None (process all).
method_ids: list
methods to process. Defaults to None (process all).
parameter_ids: list
parameters to process. Defaults to None (process all).
'''
# Quick and dirty way to keep merge from running automatically in notebooks
# when users "run all" cells. This is really just a convenience function
# that is meant to be called from the tax-credit notebooks and causing
# force=False to kill the function is the best simple control. The
# alternative is to work out a way to weed out expected_tables that have a
# merged biom, and just load that biom instead of overwriting if
# force=False. Then do the same for result_tables. If any new result_tables
# exist, perform merge if force=True. The only time force=False should
# result in a new table is when a new mock community/reference dataset
# combo is added — so just let users set force=True if that's the case.
if force is False:
exit('Skipping merge. Set force=True if you intend to generate new '
'merged tables.')
# Find expected tables, add sample metadata
expected_table_lookup = get_expected_tables_lookup(
expected_results_dir, filename_pattern=filename_pattern)
expected_tables = {}
for dataset_id, expected_dict in expected_table_lookup.items():
expected_tables[dataset_id] = {}
for reference_id, expected_table_fp in expected_dict.items():
if not exists(join(expected_results_dir, dataset_id,
reference_id, biom_fp)) or force is True:
expected_tables[dataset_id][reference_id] = \
add_sample_metadata_to_table(expected_table_fp,
dataset_id=dataset_id,
reference_id=reference_id,
min_count=min_count,
taxonomy_level=taxonomy_level,
taxa_to_keep=taxa_to_keep,
md_key='taxonomy',
method='expected',
params='expected')
# Find observed results tables, add sample metadata
result_tables = seek_results(
results_dirs, dataset_ids, reference_ids, method_ids, parameter_ids)
for dataset_id, ref_id, method, params, actual_table_fp in result_tables:
biom_destination = join(expected_results_dir, dataset_id, ref_id,
biom_fp)
if not exists(biom_destination) or force is True:
try:
expected_table_fp = \
expected_table_lookup[dataset_id][ref_id]
except KeyError:
raise KeyError("Can't find expected table for \
({0}, {1}).".format(dataset_id, ref_id))
# import expected table, amend sample ids
actual_table = \
add_sample_metadata_to_table(actual_table_fp,
dataset_id=dataset_id,
reference_id=ref_id,
min_count=min_count,
taxonomy_level=taxonomy_level,
taxa_to_keep=taxa_to_keep,
md_key='taxonomy',
method=method,
params=params)
# merge expected and resutls tables
expected_tables[dataset_id][ref_id] = \
expected_tables[dataset_id][ref_id].merge(actual_table)
# write biom table to destination
write_biom_table(expected_tables[dataset_id][ref_id],
'hdf5', biom_destination)
def _is_first(df, test_field='Method'):
"""used to filter df to contain only one row per method"""
observed = set()
result = []
for e in df[test_field]:
result.append(e not in observed)
observed.add(e)
return result
def method_by_dataset(df, dataset, sort_field, display_fields,
group_by='Dataset', test_field='Method'):
""" Generate summary of best parameter set for each method for single df
"""
dataset_df = df.loc[df[group_by] == dataset]
sorted_dataset_df = dataset_df.sort_values(by=sort_field, ascending=False)
filtered_dataset_df = sorted_dataset_df[_is_first(sorted_dataset_df,
test_field)]
return filtered_dataset_df.ix[:, display_fields]
method_by_dataset_a1 = partial(method_by_dataset,
sort_field="F-measure",
display_fields=("Method", "Parameters",
"Precision", "Recall",
"F-measure",
"Taxon Accuracy Rate",
"Taxon Detection Rate"))
def method_by_reference_comparison(df, group_by='Reference', dataset='Dataset',
level_range=range(4, 7), lv="Level",
sort_field="F-measure",
display_fields=("Reference", "Level",
"Method", "Parameters",
"Precision", "Recall",
"F-measure",
"Taxon Accuracy Rate",
"Taxon Detection Rate")):
'''Compute mean performance for a given reference/method/parameter
combination across multiple taxonomic levels.
df: pandas df
group_by: str
Category in df. Means will be averaged across these groups.
dataset: str
Category in df. df will be separated by datasets prior to computing
means.
level_range: range
Taxonomy levels to iterate.
lv: str
Category in df that contains taxonomic level information.
sort_field: str
Category in df. Results within each group/level combination will be
sorted by this field.
display_fields: tuple
Categories in df that should be printed to results table.
'''
rank = pd.DataFrame()
for ds in df[dataset].unique():
df1 = df[df[dataset] == ds]
for level in level_range:
for group in df1[group_by].unique():
a = method_by_dataset(df1[df1[lv] == level],
group_by=group_by,
dataset=group,
sort_field=sort_field,
display_fields=display_fields)
rank = pd.concat([rank, a])
return rank
| bsd-3-clause |
jcmgray/xyzpy | xyzpy/gen/batch.py | 1 | 52861 | import os
import re
import copy
import math
import time
import glob
import shutil
import pickle
import pathlib
import warnings
import functools
import importlib
import itertools
import numpy as np
import xarray as xr
from ..utils import _get_fn_name, prod, progbar
from .combo_runner import (
_combo_runner,
combo_runner_to_ds,
)
from .case_runner import (
_case_runner,
case_runner_to_ds,
)
from .prepare import (
_parse_combos,
_parse_constants,
_parse_attrs,
_parse_fn_args,
_parse_cases,
)
from .farming import Runner, Harvester, Sampler
BTCH_NM = "xyz-batch-{}.jbdmp"
RSLT_NM = "xyz-result-{}.jbdmp"
FNCT_NM = "xyz-function.clpkl"
INFO_NM = "xyz-settings.jbdmp"
class XYZError(Exception):
pass
def write_to_disk(obj, fname):
with open(fname, 'wb') as file:
pickle.dump(obj, file)
def read_from_disk(fname):
with open(fname, 'rb') as file:
return pickle.load(file)
@functools.lru_cache(8)
def get_picklelib(picklelib='joblib.externals.cloudpickle'):
return importlib.import_module(picklelib)
def to_pickle(obj, picklelib='joblib.externals.cloudpickle'):
plib = get_picklelib(picklelib)
s = plib.dumps(obj)
return s
def from_pickle(s, picklelib='joblib.externals.cloudpickle'):
plib = get_picklelib(picklelib)
obj = plib.loads(s)
return obj
# --------------------------------- parsing --------------------------------- #
def parse_crop_details(fn, crop_name, crop_parent):
"""Work out how to structure the sowed data.
Parameters
----------
fn : callable, optional
Function to infer name crop_name from, if not given.
crop_name : str, optional
Specific name to give this set of runs.
crop_parent : str, optional
Specific directory to put the ".xyz-{crop_name}/" folder in
with all the cases and results.
Returns
-------
crop_location : str
Full path to the crop-folder.
crop_name : str
Name of the crop.
crop_parent : str
Parent folder of the crop.
"""
if crop_name is None:
if fn is None:
raise ValueError("Either `fn` or `crop_name` must be give.")
crop_name = _get_fn_name(fn)
crop_parent = crop_parent if crop_parent is not None else os.getcwd()
crop_location = os.path.join(crop_parent, ".xyz-{}".format(crop_name))
return crop_location, crop_name, crop_parent
def _parse_fn_farmer(fn, farmer):
if farmer is not None:
if fn is not None:
warnings.warn("'fn' is ignored if a 'Runner', 'Harvester', or "
"'Sampler' is supplied as the 'farmer' kwarg.")
fn = farmer.fn
return fn, farmer
def infer_shape(x):
"""Take a nested sequence and find its shape as if it were an array.
Examples
--------
>>> x = [[10, 20, 30], [40, 50, 60]]
>>> infer_shape(x)
(2, 3)
"""
shape = ()
try:
shape += (len(x),)
return shape + infer_shape(x[0])
except TypeError:
return shape
def nan_like_result(res):
"""Take a single result of a function evaluation and calculate the same
sequence of scalars or arrays but filled entirely with ``nan``.
Examples
--------
>>> res = (True, [[10, 20, 30], [40, 50, 60]], -42.0)
>>> nan_like_result(res)
(array(nan), array([[nan, nan, nan],
[nan, nan, nan]]), array(nan))
"""
if isinstance(res, (xr.Dataset, xr.DataArray)):
return xr.full_like(res, np.nan, dtype=float)
try:
return tuple(np.broadcast_to(np.nan, infer_shape(x)) for x in res)
except TypeError:
return np.nan
def calc_clean_up_default_res(crop, clean_up, allow_incomplete):
"""Logic for choosing whether to automatically clean up a crop, and what,
if any, the default all-nan result should be.
"""
if clean_up is None:
clean_up = not allow_incomplete
if allow_incomplete:
default_result = crop.all_nan_result
else:
default_result = None
return clean_up, default_result
def check_ready_to_reap(crop, allow_incomplete, wait):
if not (allow_incomplete or wait or crop.is_ready_to_reap()):
raise XYZError("This crop is not ready to reap yet - results are "
"missing. You can reap only finished batches by setting"
" ``allow_incomplete=True``, but be aware this will "
"represent all missing batches with ``np.nan`` and thus"
" might effect data-types.")
class Crop(object):
"""Encapsulates all the details describing a single 'crop', that is,
its location, name, and batch size/number. Also allows tracking of
crop's progress, and experimentally, automatic submission of
workers to grid engine to complete un-grown cases. Can also be instantiated
directly from a :class:`~xyzpy.Runner` or :class:`~xyzpy.Harvester` or
:class:`~Sampler.Crop` instance.
Parameters
----------
fn : callable, optional
Target function - Crop `name` will be inferred from this if
not given explicitly. If given, `Sower` will also default
to saving a version of `fn` to disk for `batch.grow` to use.
name : str, optional
Custom name for this set of runs - must be given if `fn`
is not.
parent_dir : str, optional
If given, alternative directory to put the ".xyz-{name}/"
folder in with all the cases and results.
save_fn : bool, optional
Whether to save the function to disk for `batch.grow` to use.
Will default to True if `fn` is given.
batchsize : int, optional
How many cases to group into a single batch per worker.
By default, batchsize=1. Cannot be specified if `num_batches`
is.
num_batches : int, optional
How many total batches to aim for, cannot be specified if
`batchsize` is.
farmer : {xyzpy.Runner, xyzpy.Harvester, xyzpy.Sampler}, optional
A Runner, Harvester or Sampler, instance, from which the `fn` can be
inferred and which can also allow the Crop to reap itself straight to a
dataset or dataframe.
autoload : bool, optional
If True, check for the existence of a Crop written to disk
with the same location, and if found, load it.
See Also
--------
Runner.Crop, Harvester.Crop, Sampler.Crop
"""
def __init__(self, *,
fn=None,
name=None,
parent_dir=None,
save_fn=None,
batchsize=None,
num_batches=None,
farmer=None,
autoload=True):
self._fn, self.farmer = _parse_fn_farmer(fn, farmer)
self.name = name
self.parent_dir = parent_dir
self.save_fn = save_fn
self.batchsize = batchsize
self.num_batches = num_batches
self._batch_remainder = None
self._all_nan_result = None
# Work out the full directory for the crop
self.location, self.name, self.parent_dir = \
parse_crop_details(self._fn, self.name, self.parent_dir)
# try loading crop information if it exists
if autoload and self.is_prepared():
self._sync_info_from_disk()
# Save function so it can be automatically loaded with all deps?
if (fn is None) and (save_fn is True):
raise ValueError("Must specify a function for it to be saved!")
self.save_fn = save_fn is not False
@property
def runner(self):
if isinstance(self.farmer, Runner):
return self.farmer
elif isinstance(self.farmer, (Harvester, Sampler)):
return self.farmer.runner
else:
return None
# ------------------------------- methods ------------------------------- #
def choose_batch_settings(self, *, combos=None, cases=None):
"""Work out how to divide all cases into batches, i.e. ensure
that ``batchsize * num_batches >= num_cases``.
"""
if int(combos is not None) + int(cases is not None) != 1:
raise ValueError("Can only supply one of 'combos' or 'cases'.")
if combos is not None:
n = prod(len(x) for _, x in combos)
else:
n = len(cases)
if (self.batchsize is not None) and (self.num_batches is not None):
# Check that they are set correctly
pos_tot = self.batchsize * self.num_batches
if not (n <= pos_tot < n + self.batchsize):
raise ValueError("`batchsize` and `num_batches` cannot both"
"be specified if they do not not multiply"
"to the correct number of total cases.")
# Decide based on batchsize
elif self.num_batches is None:
if self.batchsize is None:
self.batchsize = 1
if not isinstance(self.batchsize, int):
raise TypeError("`batchsize` must be an integer.")
if self.batchsize < 1:
raise ValueError("`batchsize` must be >= 1.")
self.num_batches = math.ceil(n / self.batchsize)
self._batch_remainder = 0
# Decide based on num_batches:
else:
# cap at the total number of cases
self.num_batches = min(n, self.num_batches)
if not isinstance(self.num_batches, int):
raise TypeError("`num_batches` must be an integer.")
if self.num_batches < 1:
raise ValueError("`num_batches` must be >= 1.")
self.batchsize, self._batch_remainder = divmod(n, self.num_batches)
def ensure_dirs_exists(self):
"""Make sure the directory structure for this crop exists.
"""
os.makedirs(os.path.join(self.location, "batches"), exist_ok=True)
os.makedirs(os.path.join(self.location, "results"), exist_ok=True)
def save_info(self, combos=None, cases=None, fn_args=None):
"""Save information about the sowed cases.
"""
# If saving Harvester or Runner, strip out function information so
# as just to use pickle.
if self.farmer is not None:
farmer_copy = copy.deepcopy(self.farmer)
farmer_copy.fn = None
farmer_pkl = to_pickle(farmer_copy)
else:
farmer_pkl = None
write_to_disk({
'combos': combos,
'cases': cases,
'fn_args': fn_args,
'batchsize': self.batchsize,
'num_batches': self.num_batches,
'_batch_remainder': self._batch_remainder,
'farmer': farmer_pkl,
}, os.path.join(self.location, INFO_NM))
def load_info(self):
"""Load the full settings from disk.
"""
sfile = os.path.join(self.location, INFO_NM)
if not os.path.isfile(sfile):
raise XYZError("Settings can't be found at {}.".format(sfile))
else:
return read_from_disk(sfile)
def _sync_info_from_disk(self, only_missing=True):
"""Load information about the saved cases.
"""
settings = self.load_info()
self.batchsize = settings['batchsize']
self.num_batches = settings['num_batches']
self._batch_remainder = settings['_batch_remainder']
farmer_pkl = settings['farmer']
farmer = (
None if farmer_pkl is None else
from_pickle(farmer_pkl)
)
fn, farmer = _parse_fn_farmer(None, farmer)
# if crop already has a harvester/runner. (e.g. was instantiated from
# one) by default don't overwrite from disk
if (self.farmer) is None or (not only_missing):
self.farmer = farmer
if self.fn is None:
self.load_function()
def save_function_to_disk(self):
"""Save the base function to disk using cloudpickle
"""
write_to_disk(to_pickle(self._fn),
os.path.join(self.location, FNCT_NM))
def load_function(self):
"""Load the saved function from disk, and try to re-insert it back into
Harvester or Runner if present.
"""
self._fn = from_pickle(read_from_disk(
os.path.join(self.location, FNCT_NM)))
if self.farmer is not None:
if self.farmer.fn is None:
self.farmer.fn = self._fn
else:
# TODO: check equality?
raise XYZError("Trying to load this Crop's function, {}, from "
"disk but its farmer already has a function "
"set: {}.".format(self._fn, self.farmer.fn))
def prepare(self, combos=None, cases=None, fn_args=None):
"""Write information about this crop and the supplied combos to disk.
Typically done at start of sow, not when Crop instantiated.
"""
self.ensure_dirs_exists()
if self.save_fn:
self.save_function_to_disk()
self.save_info(combos=combos, cases=cases, fn_args=fn_args)
def is_prepared(self):
"""Check whether this crop has been written to disk.
"""
return os.path.exists(os.path.join(self.location, INFO_NM))
def calc_progress(self):
"""Calculate how much progressed has been made in growing the cases.
"""
if self.is_prepared():
self._sync_info_from_disk()
self._num_sown_batches = len(glob.glob(
os.path.join(self.location, "batches", BTCH_NM.format("*"))))
self._num_results = len(glob.glob(
os.path.join(self.location, "results", RSLT_NM.format("*"))))
else:
self._num_sown_batches = -1
self._num_results = -1
def is_ready_to_reap(self):
self.calc_progress()
return (
self._num_results > 0 and
(self._num_results == self.num_sown_batches)
)
def missing_results(self):
"""
"""
self.calc_progress()
def no_result_exists(x):
return not os.path.isfile(
os.path.join(self.location, "results", RSLT_NM.format(x)))
return tuple(filter(no_result_exists, range(1, self.num_batches + 1)))
def delete_all(self):
# delete everything
shutil.rmtree(self.location)
@property
def all_nan_result(self):
if self._all_nan_result is None:
result_files = glob.glob(
os.path.join(self.location, "results", RSLT_NM.format("*"))
)
if not result_files:
raise XYZError("To infer an all-nan result requires at least "
"one finished result.")
reference_result = read_from_disk(result_files[0])[0]
self._all_nan_result = nan_like_result(reference_result)
return self._all_nan_result
def __str__(self):
# Location and name, underlined
if not os.path.exists(self.location):
return self.location + "\n * Not yet sown, or already reaped * \n"
loc_len = len(self.location)
name_len = len(self.name)
self.calc_progress()
percentage = 100 * self._num_results / self.num_batches
# Progress bar
total_bars = 20
bars = int(percentage * total_bars / 100)
return ("\n"
"{location}\n"
"{under_crop_dir}{under_crop_name}\n"
"{num_results} / {total} batches of size {bsz} completed\n"
"[{done_bars}{not_done_spaces}] : {percentage:.1f}%"
"\n").format(
location=self.location,
under_crop_dir="-" * (loc_len - name_len),
under_crop_name="=" * name_len,
num_results=self._num_results,
total=self.num_batches,
bsz=self.batchsize,
done_bars="#" * bars,
not_done_spaces=" " * (total_bars - bars),
percentage=percentage,
)
def __repr__(self):
if not os.path.exists(self.location):
progress = "*reaped or unsown*"
else:
self.calc_progress()
progress = "{}/{}".format(self._num_results, self.num_batches)
msg = "<Crop(name='{}', progress={}, batchsize={})>"
return msg.format(self.name, progress, self.batchsize)
def parse_constants(self, constants=None):
constants = _parse_constants(constants)
if self.runner is not None:
constants = {**self.runner._constants, **constants}
constants = {**self.runner._resources, **constants}
return constants
def sow_combos(self, combos, constants=None, verbosity=1):
"""Sow to disk.
"""
combos = _parse_combos(combos)
constants = self.parse_constants(constants)
# Sort to ensure order remains same for reaping results
# (don't want to hash kwargs)
combos = sorted(combos, key=lambda x: x[0])
self.choose_batch_settings(combos=combos)
self.prepare(combos=combos)
with Sower(self) as sow_fn:
_combo_runner(fn=sow_fn, combos=combos, constants=constants,
verbosity=verbosity)
def sow_cases(self, fn_args, cases, constants=None, verbosity=1):
fn_args = _parse_fn_args(self._fn, fn_args)
cases = _parse_cases(cases)
constants = self.parse_constants(constants)
self.choose_batch_settings(cases=cases)
self.prepare(fn_args=fn_args, cases=cases)
with Sower(self) as sow_fn:
_case_runner(fn=sow_fn, fn_args=fn_args, cases=cases,
constants=constants, verbosity=verbosity)
def sow_samples(self, n, combos=None, constants=None, verbosity=1):
fn_args, cases = self.farmer.gen_cases_fnargs(n, combos)
self.sow_cases(fn_args, cases,
constants=constants, verbosity=verbosity)
def grow(self, batch_ids, **combo_runner_opts):
"""Grow specific batch numbers using this process.
"""
if isinstance(batch_ids, int):
batch_ids = (batch_ids,)
_combo_runner(grow, combos=(('batch_number', batch_ids),),
constants={'verbosity': 0, 'crop': self},
**combo_runner_opts)
def grow_missing(self, **combo_runner_opts):
"""Grow any missing results using this process.
"""
self.grow(batch_ids=self.missing_results(), **combo_runner_opts)
def reap_combos(self, wait=False, clean_up=None, allow_incomplete=False):
"""Reap already sown and grown results from this crop.
Parameters
----------
wait : bool, optional
Whether to wait for results to appear. If false (default) all
results need to be in place before the reap.
clean_up : bool, optional
Whether to delete all the batch files once the results have been
gathered. If left as ``None`` this will be automatically set to
``not allow_incomplete``.
allow_incomplete : bool, optional
Allow only partially completed crop results to be reaped,
incomplete results will all be filled-in as nan.
Returns
-------
results : nested tuple
'N-dimensional' tuple containing the results.
"""
check_ready_to_reap(self, allow_incomplete, wait)
clean_up, default_result = calc_clean_up_default_res(
self, clean_up, allow_incomplete
)
# load same combinations as cases saved with
settings = read_from_disk(os.path.join(self.location, INFO_NM))
with Reaper(self, num_batches=settings['num_batches'],
wait=wait, default_result=default_result) as reap_fn:
results = _combo_runner(fn=reap_fn, constants={},
combos=settings['combos'])
if clean_up:
self.delete_all()
return results
def reap_combos_to_ds(self,
var_names=None,
var_dims=None,
var_coords=None,
constants=None,
attrs=None,
parse=True,
wait=False,
clean_up=None,
allow_incomplete=False,
to_df=False):
"""Reap a function over sowed combinations and output to a Dataset.
Parameters
----------
var_names : str, sequence of strings, or None
Variable name(s) of the output(s) of `fn`, set to None if
fn outputs data already labelled in a Dataset or DataArray.
var_dims : sequence of either strings or string sequences, optional
'Internal' names of dimensions for each variable, the values for
each dimension should be contained as a mapping in either
`var_coords` (not needed by `fn`) or `constants` (needed by `fn`).
var_coords : mapping, optional
Mapping of extra coords the output variables may depend on.
constants : mapping, optional
Arguments to `fn` which are not iterated over, these will be
recorded either as attributes or coordinates if they are named
in `var_dims`.
resources : mapping, optional
Like `constants` but they will not be recorded.
attrs : mapping, optional
Any extra attributes to store.
wait : bool, optional
Whether to wait for results to appear. If false (default) all
results need to be in place before the reap.
clean_up : bool, optional
Whether to delete all the batch files once the results have been
gathered. If left as ``None`` this will be automatically set to
``not allow_incomplete``.
allow_incomplete : bool, optional
Allow only partially completed crop results to be reaped,
incomplete results will all be filled-in as nan.
to_df : bool, optional
Whether to reap to a ``xarray.Dataset`` or a ``pandas.DataFrame``.
Returns
-------
xarray.Dataset or pandas.Dataframe
Multidimensional labelled dataset contatining all the results.
"""
check_ready_to_reap(self, allow_incomplete, wait)
clean_up, default_result = calc_clean_up_default_res(
self, clean_up, allow_incomplete
)
# Load exact same combinations as cases saved with
settings = read_from_disk(os.path.join(self.location, INFO_NM))
if parse:
constants = _parse_constants(constants)
attrs = _parse_attrs(attrs)
with Reaper(self, num_batches=settings['num_batches'],
wait=wait, default_result=default_result) as reap_fn:
# Move constants into attrs, so as not to pass them to the Reaper
# when if fact they were meant for the original function.
opts = dict(
fn=reap_fn,
var_names=var_names,
var_dims=var_dims,
var_coords=var_coords,
constants={},
resources={},
parse=parse
)
if settings['combos'] is not None:
opts['combos'] = settings['combos']
opts['attrs'] = {**attrs, **constants}
data = combo_runner_to_ds(**opts)
else:
opts['fn_args'] = settings['fn_args']
opts['cases'] = settings['cases']
opts['to_df'] = to_df
data = case_runner_to_ds(**opts)
if clean_up:
self.delete_all()
return data
def reap_runner(self, runner, wait=False, clean_up=None,
allow_incomplete=False, to_df=False):
"""Reap a Crop over sowed combos and save to a dataset defined by a
Runner.
"""
# Can ignore `Runner.resources` as they play no part in desecribing the
# output, though they should be supplied to sow and thus grow.
data = self.reap_combos_to_ds(
var_names=runner._var_names,
var_dims=runner._var_dims,
var_coords=runner._var_coords,
constants=runner._constants,
attrs=runner._attrs,
parse=False,
wait=wait,
clean_up=clean_up,
allow_incomplete=allow_incomplete,
to_df=to_df)
if to_df:
runner._last_df = data
else:
runner._last_ds = data
return data
def reap_harvest(self, harvester, wait=False, sync=True, overwrite=None,
clean_up=None, allow_incomplete=False):
"""Reap a Crop over sowed combos and merge with the dataset defined by
a Harvester.
"""
if harvester is None:
raise ValueError("Cannot reap and harvest if no Harvester is set.")
ds = self.reap_runner(harvester.runner, wait=wait, clean_up=False,
allow_incomplete=allow_incomplete, to_df=False)
if sync:
harvester.add_ds(ds, sync=sync, overwrite=overwrite)
# defer cleaning up until we have sucessfully synced new dataset
if clean_up is None:
clean_up = not allow_incomplete
if clean_up:
self.delete_all()
return ds
def reap_samples(self, sampler, wait=False, sync=True,
clean_up=None, allow_incomplete=False):
if sampler is None:
raise ValueError("Cannot reap samples without a 'Sampler'.")
df = self.reap_runner(sampler.runner, wait=wait, clean_up=clean_up,
allow_incomplete=allow_incomplete, to_df=True)
if sync:
sampler._last_df = df
sampler.add_df(df, sync=sync)
return df
def reap(self, wait=False, sync=True, overwrite=None,
clean_up=None, allow_incomplete=False):
"""Reap sown and grown combos from disk. Return a dataset if a runner
or harvester is set, otherwise, the raw nested tuple.
Parameters
----------
wait : bool, optional
Whether to wait for results to appear. If false (default) all
results need to be in place before the reap.
sync : bool, optional
Immediately sync the new dataset with the on-disk full dataset or
dataframe if a harvester or sampler is used.
overwrite : bool, optional
How to compare data when syncing to on-disk dataset.
If ``None``, (default) merge as long as no conflicts.
``True``: overwrite with the new data. ``False``, discard any
new conflicting data.
clean_up : bool, optional
Whether to delete all the batch files once the results have been
gathered. If left as ``None`` this will be automatically set to
``not allow_incomplete``.
allow_incomplete : bool, optional
Allow only partially completed crop results to be reaped,
incomplete results will all be filled-in as nan.
Returns
-------
nested tuple or xarray.Dataset
"""
opts = dict(clean_up=clean_up, wait=wait,
allow_incomplete=allow_incomplete)
if isinstance(self.farmer, Runner):
return self.reap_runner(self.farmer, **opts)
if isinstance(self.farmer, Harvester):
opts['overwrite'] = overwrite
return self.reap_harvest(self.farmer, **opts)
if isinstance(self.farmer, Sampler):
return self.reap_samples(self.farmer, **opts)
return self.reap_combos(**opts)
def check_bad(self, delete_bad=True):
"""Check that the result dumps are not bad -> sometimes length does not
match the batch. Optionally delete these so that they can be re-grown.
Parameters
----------
delete_bad : bool
Delete bad results as they are come across.
Returns
-------
bad_ids : tuple
The bad batch numbers.
"""
# XXX: work out why this is needed sometimes on network filesystems.
result_files = glob.glob(
os.path.join(self.location, "results", RSLT_NM.format("*")))
bad_ids = []
for result_file in result_files:
# load corresponding batch file to check length.
result_num = os.path.split(
result_file)[-1].strip("xyz-result-").strip(".jbdmp")
batch_file = os.path.join(
self.location, "batches", BTCH_NM.format(result_num))
batch = read_from_disk(batch_file)
try:
result = read_from_disk(result_file)
unloadable = False
except Exception as e:
unloadable = True
err = e
if unloadable or (len(result) != len(batch)):
msg = "result {} is bad".format(result_file)
msg += "." if not delete_bad else " - deleting it."
msg += " Error was: {}".format(err) if unloadable else ""
print(msg)
if delete_bad:
os.remove(result_file)
bad_ids.append(result_num)
return tuple(bad_ids)
# ----------------------------- properties ----------------------------- #
def _get_fn(self):
return self._fn
def _set_fn(self, fn):
if self.save_fn is None and fn is not None:
self.save_fn = True
self._fn = fn
def _del_fn(self):
self._fn = None
self.save_fn = False
fn = property(_get_fn, _set_fn, _del_fn,
"Function to save with the Crop for automatic loading and "
"running. Default crop name will be inferred from this if"
"not given explicitly as well.")
@property
def num_sown_batches(self):
"""Total number of batches to be run/grown.
"""
self.calc_progress()
return self._num_sown_batches
@property
def num_results(self):
self.calc_progress()
return self._num_results
def load_crops(directory='.'):
"""Automatically load all the crops found in the current directory.
Parameters
----------
directory : str, optional
Which directory to load the crops from, defaults to '.' - the current.
Returns
-------
dict[str, Crop]
Mapping of the crop name to the Crop.
"""
import os
import re
folders = next(os.walk(directory))[1]
crop_rgx = re.compile(r'^\.xyz-(.+)')
names = []
for folder in folders:
match = crop_rgx.match(folder)
if match:
names.append(match.groups(1)[0])
return {name: Crop(name=name) for name in names}
class Sower(object):
"""Class for sowing a 'crop' of batched combos to then 'grow' (on any
number of workers sharing the filesystem) and then reap.
"""
def __init__(self, crop):
"""
Parameters
----------
crop : xyzpy.batch.Crop instance
Description of where and how to store the cases and results.
"""
self.crop = crop
# Internal:
self._batch_cases = [] # collects cases to be written in single batch
self._counter = 0 # counts how many cases are in batch so far
self._batch_counter = 0 # counts how many batches have been written
def save_batch(self):
"""Save the current batch of cases to disk and start the next batch.
"""
self._batch_counter += 1
write_to_disk(self._batch_cases, os.path.join(
self.crop.location, "batches", BTCH_NM.format(self._batch_counter))
)
self._batch_cases = []
self._counter = 0
# Context manager #
def __enter__(self):
return self
def __call__(self, **kwargs):
self._batch_cases.append(kwargs)
self._counter += 1
# when the number of cases doesn't divide the number of batches we
# distribute the remainder among the first crops.
extra_batch = self._batch_counter < self.crop._batch_remainder
if self._counter == self.crop.batchsize + int(extra_batch):
self.save_batch()
def __exit__(self, exception_type, exception_value, traceback):
# Make sure any overfill also saved
if self._batch_cases:
self.save_batch()
def grow(batch_number, crop=None, fn=None, check_mpi=True,
verbosity=2, debugging=False):
"""Automatically process a batch of cases into results. Should be run in an
".xyz-{fn_name}" folder.
Parameters
----------
batch_number : int
Which batch to 'grow' into a set of results.
crop : xyzpy.batch.Crop instance
Description of where and how to store the cases and results.
fn : callable, optional
If specified, the function used to generate the results, otherwise
the function will be loaded from disk.
check_mpi : bool, optional
Whether to check if the process is rank 0 and only save results if
so - allows mpi functions to be simply used. Defaults to true,
this should only be turned off if e.g. a pool of workers is being
used to run different ``grow`` instances.
verbosity : {0, 1, 2}, optional
How much information to show.
debugging : bool, optional
Set logging level to DEBUG.
"""
if debugging:
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if crop is None:
current_folder = os.path.relpath('.', '..')
if current_folder[:5] != ".xyz-":
raise XYZError("`grow` should be run in a "
"\"{crop_parent}/.xyz-{crop_name}\" folder, else "
"`crop_parent` and `crop_name` (or `fn`) should be "
"specified.")
crop_name = current_folder[5:]
crop_location = os.getcwd()
else:
crop_name = crop.name
crop_location = crop.location
# load function
if fn is None:
fn = from_pickle(read_from_disk(os.path.join(crop_location, FNCT_NM)))
# load cases to evaluate
cases = read_from_disk(
os.path.join(crop_location, "batches", BTCH_NM.format(batch_number)))
if len(cases) == 0:
raise ValueError("Something has gone wrong with the loading of "
"batch {} ".format(BTCH_NM.format(batch_number)) +
"for the crop at {}.".format(crop.location))
# maybe want to run grow as mpiexec (i.e. `fn` itself in parallel),
# so only save and delete on rank 0
if check_mpi and 'OMPI_COMM_WORLD_RANK' in os.environ: # pragma: no cover
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
elif check_mpi and 'PMI_RANK' in os.environ: # pragma: no cover
rank = int(os.environ['PMI_RANK'])
else:
rank = 0
if rank == 0:
if verbosity >= 1:
print(f"xyzpy: loaded batch {batch_number} of {crop_name}.")
results = []
pbar = progbar(range(len(cases)), disable=verbosity <= 0)
for i in pbar:
if verbosity >= 2:
pbar.set_description(f"{cases[i]}")
# compute and store result!
results.append(fn(**cases[i]))
if len(results) != len(cases):
raise ValueError("Something has gone wrong with processing "
"batch {} ".format(BTCH_NM.format(batch_number)) +
"for the crop at {}.".format(crop.location))
# save to results
write_to_disk(tuple(results), os.path.join(
crop_location, "results", RSLT_NM.format(batch_number)))
if verbosity >= 1:
print(f"xyzpy: success - batch {batch_number} completed.")
else:
for case in cases:
# worker: just help compute the result!
fn(**case)
# --------------------------------------------------------------------------- #
# Gathering results #
# --------------------------------------------------------------------------- #
class Reaper(object):
"""Class that acts as a stateful function to retrieve already sown and
grow results.
"""
def __init__(self, crop, num_batches, wait=False, default_result=None):
"""Class for retrieving the batched, flat, 'grown' results.
Parameters
----------
crop : xyzpy.batch.Crop instance
Description of where and how to store the cases and results.
"""
self.crop = crop
files = (
os.path.join(self.crop.location, "results", RSLT_NM.format(i + 1))
for i in range(num_batches)
)
def _load(x):
use_default = (
(default_result is not None) and
(not wait) and
(not os.path.isfile(x))
)
# actual result doesn't exist yet - use the default if specified
if use_default:
i = int(re.findall(RSLT_NM.format(r'(\d+)'), x)[0])
size = crop.batchsize + int(i < crop._batch_remainder)
res = (default_result,) * size
else:
res = read_from_disk(x)
if (res is None) or len(res) == 0:
raise ValueError("Something not right: result {} contains "
"no data upon read from disk.".format(x))
return res
def wait_to_load(x):
while not os.path.exists(x):
time.sleep(0.2)
if os.path.isfile(x):
return _load(x)
else:
raise ValueError("{} is not a file.".format(x))
self.results = itertools.chain.from_iterable(map(
wait_to_load if wait else _load, files))
def __enter__(self):
return self
def __call__(self, **kwargs):
return next(self.results)
def __exit__(self, exception_type, exception_value, traceback):
# Check everything gone acccording to plan
if tuple(self.results):
raise XYZError("Not all results reaped!")
# --------------------------------------------------------------------------- #
# Automatic Batch Submission Scripts #
# --------------------------------------------------------------------------- #
_SGE_HEADER = (
"#!/bin/bash -l\n"
"#$ -S /bin/bash\n"
"#$ -l h_rt={hours}:{minutes}:{seconds},mem={gigabytes}G\n"
"#$ -l tmpfs={temp_gigabytes}G\n"
"{extra_resources}\n"
"#$ -N {name}\n"
"mkdir -p {output_directory}\n"
"#$ -wd {output_directory}\n"
"#$ -pe {pe} {num_procs}\n"
"#$ -t {run_start}-{run_stop}\n")
_PBS_HEADER = (
"#!/bin/bash -l\n"
"#PBS -lselect={num_nodes}:ncpus={num_procs}:mem={gigabytes}gb\n"
"#PBS -lwalltime={hours:02}:{minutes:02}:{seconds:02}\n"
"{extra_resources}\n"
"#PBS -N {name}\n"
"#PBS -J {run_start}-{run_stop}\n")
_SLURM_HEADER = (
"#!/bin/bash -l\n"
"#SBATCH --nodes={num_nodes}\n"
"#SBATCH --mem={gigabytes}gb\n"
"#SBATCH --cpus-per-task={num_procs}\n"
"#SBATCH --time={hours:02}:{minutes:02}:{seconds:02}\n"
"{extra_resources}\n"
"#SBATCH --job-name={name}\n"
"#SBATCH --array={run_start}-{run_stop}\n")
_BASE = (
"cd {working_directory}\n"
"export OMP_NUM_THREADS={num_threads}\n"
"export MKL_NUM_THREADS={num_threads}\n"
"export OPENBLAS_NUM_THREADS={num_threads}\n"
"{shell_setup}\n"
"tmpfile=$(mktemp .xyzpy-qsub.XXXXXXXX)\n"
"cat <<EOF > $tmpfile\n"
"{setup}\n"
"from xyzpy.gen.batch import grow, Crop\n"
"if __name__ == '__main__':\n"
" crop = Crop(name='{name}', parent_dir='{parent_dir}')\n")
_CLUSTER_SGE_GROW_ALL_SCRIPT = (
" grow($SGE_TASK_ID, crop=crop, debugging={debugging})\n")
_CLUSTER_PBS_GROW_ALL_SCRIPT = (
" grow($PBS_ARRAY_INDEX, crop=crop, debugging={debugging})\n")
_CLUSTER_SLURM_GROW_ALL_SCRIPT = (
" grow($SLURM_ARRAY_TASK_ID, crop=crop, debugging={debugging})\n")
_CLUSTER_SGE_GROW_PARTIAL_SCRIPT = (
" batch_ids = {batch_ids}]\n"
" grow(batch_ids[$SGE_TASK_ID - 1], crop=crop, "
"debugging={debugging})\n")
_CLUSTER_PBS_GROW_PARTIAL_SCRIPT = (
" batch_ids = {batch_ids}\n"
" grow(batch_ids[$PBS_ARRAY_INDEX - 1], crop=crop, "
"debugging={debugging})\n")
_CLUSTER_SLURM_GROW_PARTIAL_SCRIPT = (
" batch_ids = {batch_ids}\n"
" grow(batch_ids[$SLURM_ARRAY_TASK_ID - 1], crop=crop, "
"debugging={debugging})\n")
_BASE_CLUSTER_SCRIPT_END = (
"EOF\n"
"{launcher} $tmpfile\n"
"rm $tmpfile\n")
def gen_cluster_script(
crop, scheduler, batch_ids=None, *,
hours=None,
minutes=None,
seconds=None,
gigabytes=2,
num_procs=1,
num_threads=None,
num_nodes=1,
launcher='python',
setup="#",
shell_setup="",
mpi=False,
temp_gigabytes=1,
output_directory=None,
extra_resources=None,
debugging=False,
):
"""Generate a cluster script to grow a Crop.
Parameters
----------
crop : Crop
The crop to grow.
scheduler : {'sge', 'pbs', 'slurm'}
Whether to use a SGE, PBS or slurm submission script template.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
hours : int
How many hours to request, default=0.
minutes : int, optional
How many minutes to request, default=20.
seconds : int, optional
How many seconds to request, default=0.
gigabytes : int, optional
How much memory to request, default: 2.
num_procs : int, optional
How many processes to request (threaded cores or MPI), default: 1.
launcher : str, optional
How to launch the script, default: ``'python'``. But could for example
be ``'mpiexec python'`` for a MPI program.
setup : str, optional
Python script to run before growing, for things that shouldnt't be put
in the crop function itself, e.g. one-time imports with side-effects
like: ``"import tensorflow as tf; tf.enable_eager_execution()``".
shell_setup : str, optional
Commands to be run by the shell before the python script is executed.
E.g. ``conda activate my_env``.
mpi : bool, optional
Request MPI processes not threaded processes.
temp_gigabytes : int, optional
How much temporary on-disk memory.
output_directory : str, optional
What directory to write output to. Defaults to "$HOME/Scratch/output".
extra_resources : str, optional
Extra "#$ -l" resources, e.g. 'gpu=1'
debugging : bool, optional
Set the python log level to debugging.
Returns
-------
str
"""
scheduler = scheduler.lower() # be case-insensitive for scheduler
if scheduler not in {'sge', 'pbs', 'slurm'}:
raise ValueError("scheduler must be one of 'sge', 'pbs', or 'slurm'")
if hours is minutes is seconds is None:
hours, minutes, seconds = 1, 0, 0
else:
hours = 0 if hours is None else int(hours)
minutes = 0 if minutes is None else int(minutes)
seconds = 0 if seconds is None else int(seconds)
if output_directory is None:
from os.path import expanduser
home = expanduser("~")
output_directory = os.path.join(home, 'Scratch', 'output')
crop.calc_progress()
if extra_resources is None:
extra_resources = ""
elif scheduler == 'slurm':
extra_resources = '#SBATCH --' + \
'\n#SBATCH --'.join(extra_resources.split(','))
else:
extra_resources = "#$ -l {}".format(extra_resources)
if num_threads is None:
if mpi:
num_threads = 1
else:
num_threads = num_procs
# get absolute path
full_parent_dir = str(pathlib.Path(crop.parent_dir).expanduser().resolve())
opts = {
'hours': hours,
'minutes': minutes,
'seconds': seconds,
'gigabytes': gigabytes,
'name': crop.name,
'parent_dir': full_parent_dir,
'num_procs': num_procs,
'num_threads': num_threads,
'num_nodes': num_nodes,
'run_start': 1,
'launcher': launcher,
'setup': setup,
'shell_setup': shell_setup,
'pe': 'mpi' if mpi else 'smp',
'temp_gigabytes': temp_gigabytes,
'output_directory': output_directory,
'working_directory': full_parent_dir,
'extra_resources': extra_resources,
'debugging': debugging,
}
if scheduler == 'sge':
script = _SGE_HEADER
elif scheduler == 'pbs':
script = _PBS_HEADER
elif scheduler == 'slurm':
script = _SLURM_HEADER
script += _BASE
# grow specific ids
if batch_ids is not None:
if scheduler == 'sge':
script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT
elif scheduler == 'pbs':
script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT
elif scheduler == 'slurm':
script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT
batch_ids = tuple(batch_ids)
opts['run_stop'] = len(batch_ids)
opts['batch_ids'] = batch_ids
# grow all ids
elif crop.num_results == 0:
batch_ids = tuple(range(crop.num_batches))
if scheduler == 'sge':
script += _CLUSTER_SGE_GROW_ALL_SCRIPT
elif scheduler == 'pbs':
script += _CLUSTER_PBS_GROW_ALL_SCRIPT
elif scheduler == 'slurm':
script += _CLUSTER_SLURM_GROW_ALL_SCRIPT
opts['run_stop'] = crop.num_batches
# grow missing ids only
else:
if scheduler == 'sge':
script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT
elif scheduler == 'pbs':
script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT
elif scheduler == 'slurm':
script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT
batch_ids = crop.missing_results()
opts['run_stop'] = len(batch_ids)
opts['batch_ids'] = batch_ids
script += _BASE_CLUSTER_SCRIPT_END
script = script.format(**opts)
if (scheduler == 'pbs') and len(batch_ids) == 1:
# PBS can't handle arrays jobs of size 1...
script = (script.replace('#PBS -J 1-1\n', "")
.replace("$PBS_ARRAY_INDEX", '1'))
return script
def grow_cluster(
crop, scheduler, batch_ids=None, *,
hours=None,
minutes=None,
seconds=None,
gigabytes=2,
num_procs=1,
num_threads=None,
num_nodes=1,
launcher='python',
setup="#",
shell_setup="",
mpi=False,
temp_gigabytes=1,
output_directory=None,
extra_resources=None,
debugging=False,
): # pragma: no cover
"""Automagically submit SGE, PBS, or slurm jobs to grow all missing
results.
Parameters
----------
crop : Crop
The crop to grow.
scheduler : {'sge', 'pbs', 'slurm'}
Whether to use a SGE, PBS or slurm submission script template.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
hours : int
How many hours to request, default=0.
minutes : int, optional
How many minutes to request, default=20.
seconds : int, optional
How many seconds to request, default=0.
gigabytes : int, optional
How much memory to request, default: 2.
num_procs : int, optional
How many processes to request (threaded cores or MPI), default: 1.
launcher : str, optional
How to launch the script, default: ``'python'``. But could for example
be ``'mpiexec python'`` for a MPI program.
setup : str, optional
Python script to run before growing, for things that shouldnt't be put
in the crop function itself, e.g. one-time imports with side-effects
like: ``"import tensorflow as tf; tf.enable_eager_execution()``".
shell_setup : str, optional
Commands to be run by the shell before the python script is executed.
E.g. ``conda activate my_env``.
mpi : bool, optional
Request MPI processes not threaded processes.
temp_gigabytes : int, optional
How much temporary on-disk memory.
output_directory : str, optional
What directory to write output to. Defaults to "$HOME/Scratch/output".
extra_resources : str, optional
Extra "#$ -l" resources, e.g. 'gpu=1'
debugging : bool, optional
Set the python log level to debugging.
"""
if crop.is_ready_to_reap():
print("Crop ready to reap: nothing to submit.")
return
import subprocess
script = gen_cluster_script(
crop, scheduler, batch_ids=batch_ids,
hours=hours,
minutes=minutes,
seconds=seconds,
gigabytes=gigabytes,
temp_gigabytes=temp_gigabytes,
output_directory=output_directory,
num_procs=num_procs,
num_threads=num_threads,
num_nodes=num_nodes,
launcher=launcher,
setup=setup,
shell_setup=shell_setup,
mpi=mpi,
extra_resources=extra_resources,
debugging=debugging,
)
script_file = os.path.join(crop.location, "__qsub_script__.sh")
with open(script_file, mode='w') as f:
f.write(script)
if scheduler in {'sge', 'pbs'}:
result = subprocess.run(['qsub', script_file], capture_output=True)
elif scheduler == 'slurm':
result = subprocess.run(['sbatch', script_file], capture_output=True)
print(result.stderr.decode())
print(result.stdout.decode())
os.remove(script_file)
def gen_qsub_script(
crop, batch_ids=None, *, scheduler='sge',
**kwargs
): # pragma: no cover
"""Generate a qsub script to grow a Crop. Deprecated in favour of
`gen_cluster_script` and will be removed in the future.
Parameters
----------
crop : Crop
The crop to grow.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
scheduler : {'sge', 'pbs'}, optional
Whether to use a SGE or PBS submission script template.
kwargs
See `gen_cluster_script` for all other parameters.
"""
warnings.warn("'gen_qsub_script' is deprecated in favour of "
"`gen_cluster_script` and will be removed in the future",
FutureWarning)
return gen_cluster_script(crop, scheduler, batch_ids=batch_ids, **kwargs)
def qsub_grow(
crop, batch_ids=None, *, scheduler='sge',
**kwargs
): # pragma: no cover
"""Automagically submit SGE or PBS jobs to grow all missing results.
Deprecated in favour of `grow_cluster` and will be removed in the future.
Parameters
----------
crop : Crop
The crop to grow.
batch_ids : int or tuple[int]
Which batch numbers to grow, defaults to all missing batches.
scheduler : {'sge', 'pbs'}, optional
Whether to use a SGE or PBS submission script template.
kwargs
See `grow_cluster` for all other parameters.
"""
warnings.warn("'qsub_grow' is deprecated in favour of "
"`grow_cluster` and will be removed in the future",
FutureWarning)
grow_cluster(crop, scheduler, batch_ids=batch_ids, **kwargs)
Crop.gen_qsub_script = gen_qsub_script
Crop.qsub_grow = qsub_grow
Crop.gen_cluster_script = gen_cluster_script
Crop.grow_cluster = grow_cluster
Crop.gen_sge_script = functools.partialmethod(Crop.gen_cluster_script,
scheduler='sge')
Crop.grow_sge = functools.partialmethod(Crop.grow_cluster, scheduler='sge')
Crop.gen_pbs_script = functools.partialmethod(Crop.gen_cluster_script,
scheduler='pbs')
Crop.grow_pbs = functools.partialmethod(Crop.grow_cluster, scheduler='pbs')
Crop.gen_slurm_script = functools.partialmethod(Crop.gen_cluster_script,
scheduler='slurm')
Crop.grow_slurm = functools.partialmethod(Crop.grow_cluster, scheduler='slurm')
| mit |
yufeldman/arrow | python/pyarrow/tests/test_feather.py | 3 | 15177 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
import pyarrow as pa
from pyarrow.compat import guid
from pyarrow.feather import (read_feather, write_feather,
FeatherReader)
from pyarrow.lib import FeatherWriter
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with pytest.raises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
if columns is None or col.name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
nthreads=1):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, nthreads=nthreads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
pytest.raises(exc, f)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_platform_numpy_integers(self):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter()
writer.open(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_buffer_bounds_error(self):
# ARROW-1676
path = random_path()
self.test_files.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
writer = FeatherWriter()
writer.open(path)
writer.write_array('arr', values)
writer.close()
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
assert_frame_equal(result, expected)
self._check_pandas_roundtrip(expected, null_counts=[1])
def test_boolean_object_nulls(self):
repeats = 100
arr = np.array([False, None, True] * repeats, dtype=object)
df = pd.DataFrame({'bools': arr})
self._check_pandas_roundtrip(df, null_counts=[1 * repeats])
def test_delete_partial_file_on_error(self):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
class CustomClass(object):
pass
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, u'bar', CustomClass(), np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path)
except Exception:
pass
assert not os.path.exists(path)
def test_strings(self):
repeats = 1000
# Mixed bytes, unicode, strings coerced to binary
values = [b'foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
ex_values = [b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': ex_values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
def test_empty_strings(self):
df = pd.DataFrame({'strings': [''] * 10})
self._check_pandas_roundtrip(df)
def test_all_none(self):
df = pd.DataFrame({'all_none': [None] * 10})
self._check_pandas_roundtrip(df, null_counts=[10])
def test_all_null_category(self):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
self._check_pandas_roundtrip(df, null_counts=[0, 3])
def test_multithreaded_read(self):
data = {'c{0}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df, nthreads=4)
def test_nan_as_null(self):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
self._check_pandas_roundtrip(df)
def test_category(self):
repeats = 1000
values = ['foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
self._check_pandas_roundtrip(df, expected,
null_counts=[2 * repeats])
def test_timestamp(self):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
self._check_pandas_roundtrip(df)
def test_timestamp_with_nulls(self):
df = pd.DataFrame({'test': [pd.datetime(2016, 1, 1),
None,
pd.datetime(2016, 1, 3)]})
df['with_tz'] = df.test.dt.tz_localize('utc')
self._check_pandas_roundtrip(df, null_counts=[1, 1])
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta_with_nulls(self):
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
self._check_pandas_roundtrip(df, null_counts=[1, 1])
def test_out_of_float64_timestamp_with_nulls(self):
df = pd.DataFrame(
{'test': pd.DatetimeIndex([1451606400000000001,
None, 14516064000030405])})
df['with_tz'] = df.test.dt.tz_localize('utc')
self._check_pandas_roundtrip(df, null_counts=[1, 1])
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3, 4],
1: [True, False, True, False]})
expected = df.rename(columns=str)
self._check_pandas_roundtrip(df, expected)
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
reason='unicode filenames not supported')
def test_unicode_filename(self):
# GH #209
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
self._check_pandas_roundtrip(df, path=name)
def test_read_columns(self):
data = {'foo': [1, 2, 3, 4],
'boo': [5, 6, 7, 8],
'woo': [1, 3, 5, 7]}
columns = list(data.keys())[1:3]
df = pd.DataFrame(data)
expected = pd.DataFrame({c: data[c] for c in columns})
self._check_pandas_roundtrip(df, expected, columns=columns)
def test_overwritten_file(self):
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
values = np.random.randint(0, 10, size=num_values)
write_feather(pd.DataFrame({'ints': values}), path)
df = pd.DataFrame({'ints': values[0: num_values//2]})
self._check_pandas_roundtrip(df, path=path)
def test_filelike_objects(self):
from io import BytesIO
buf = BytesIO()
# the copy makes it non-strided
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=['a', 'b', 'c']).copy()
write_feather(df, buf)
buf.seek(0)
result = read_feather(buf)
assert_frame_equal(result, df)
def test_sparse_dataframe(self):
# GH #221
data = {'A': [0, 1, 2],
'B': [1, 0, 1]}
df = pd.DataFrame(data).to_sparse(fill_value=1)
expected = df.to_dense()
self._check_pandas_roundtrip(df, expected)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self._assert_error_on_write(df, ValueError)
def test_unsupported(self):
# https://github.com/wesm/feather/issues/240
# serializing actual python objects
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self._assert_error_on_write(df, ValueError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self._assert_error_on_write(df, ValueError)
@pytest.mark.slow
def test_large_dataframe(self):
df = pd.DataFrame({'A': np.arange(400000000)})
self._check_pandas_roundtrip(df)
| apache-2.0 |
cybernet14/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
kaylanb/SkinApp | machine_learn/blob_hog_predict_common_url_set/predict_with_hog.py | 1 | 4118 | '''outputs features for HOG machine learning'''
from matplotlib import image as mpimg
from scipy import sqrt, pi, arctan2, cos, sin, ndimage, fftpack, stats
from skimage import exposure, measure, feature
from PIL import Image
import cStringIO
import urllib2
from numpy.random import rand
from numpy import ones, zeros, concatenate, array
from pandas import read_csv, DataFrame
from pandas import concat as pd_concat
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
import pickle
def get_indices_urls_that_exist(urls):
inds_exist=[]
cnt=-1
for url in df.URL.values:
cnt+=1
print "cnt= %d" % cnt
try:
read= urllib2.urlopen(url).read()
inds_exist.append(cnt)
except urllib2.URLError:
continue
return np.array(inds_exist)
def update_dataframes_with_urls_exist():
root= "machine_learn/blob_hog_predict_common_url_set/"
url_1= root+"NoLims_shuffled_url_answer.pickle"
fin=open(url_1,"r")
url_df= pickle.load(fin)
fin.close()
url_1_inds= get_indices_urls_that_exist(url_df.URL.values)
path=root+"NoLims_shuffled_blob_features.pickle"
fin=open(path,"r")
blob_feat_df= pickle.load(fin)
fin.close()
url_df_exist= url_df.ix[url_1_inds,:]
blob_feat_df_exist= blob_feat_df.ix[url_1_inds,:]
fout = open("NoLims_shuffled_url_answer.pickle", 'w')
pickle.dump(url_df_exist, fout)
fout.close()
fout = open("NoLims_shuffled_blob_features.pickle", 'w')
pickle.dump(blob_feat_df_exist, fout)
fout.close()
def hog_features(ans_url_df,output_pickle_name):
urls=ans_url_df.URL.values
answers=ans_url_df.answer.values
urls_exist=[]
ans_exist=[]
cnt=-1
for url,ans in zip(urls,answers):
cnt+=1
print "cnt= %d , checking urls" % cnt
try:
read= urllib2.urlopen(url).read()
urls_exist.append(url)
ans_exist.append(ans)
except urllib2.URLError:
continue
urls_exist= array(urls_exist)
ans_exist= array(ans_exist)
feat = zeros((len(urls_exist), 900))
count=0
for url in urls_exist:
print "count= %d -- calc features" % count
read= urllib2.urlopen(url).read()
obj = Image.open( cStringIO.StringIO(read) )
img = array(obj.convert('L'))
blocks = feature.hog(img, orientations=9, pixels_per_cell=(100,100), cells_per_block=(5,5), visualise=False, normalise=True) #People_All_9.csv Food_All_9.csv
if(len(blocks) == 900):
feat[count] = blocks
count += 1
urls_exist_df= DataFrame(urls_exist,columns=["URL"])
ans_exist_df= DataFrame(ans_exist,columns=["answer"])
feat_df= DataFrame(feat)
final_df= pd_concat([urls_exist_df,ans_exist_df,feat_df],axis=1)
fout = open(output_pickle_name, 'w')
pickle.dump(final_df.dropna(), fout)
fout.close()
def train_and_predict(feat_df,predict_save_name):
TrainX= feat_df.values[0:300,2:]
TrainY= feat_df.answer.values[0:300]
TestX= feat_df.values[300:,2:]
TestY=feat_df.answer.values[300:]
TestUrls= feat_df.URL.values[300:]
ET_classifier = ExtraTreesClassifier(n_estimators=50, max_depth=None, min_samples_split=1, random_state=0)
ET_classifier.fit(TrainX,TrainY)
ET_prediction = ET_classifier.predict(TestX)
LinSVC_classifier = svm.LinearSVC()
LinSVC_classifier.fit(TrainX,TrainY)
LinSVC_predict = LinSVC_classifier.predict(TestX)
a=DataFrame()
a["url"]=TestUrls
a["answer"]=TestY
a["ET_predict"]=ET_prediction
a["LinSVC_predict"]=LinSVC_predict
fout = open(predict_save_name, 'w')
pickle.dump(a, fout)
fout.close()
###hog features
# fin=open('FacesAndLimbs_shuffled_url_answer.pickle',"r")
# ans_url_df= pickle.load(fin)
# fin.close()
# hog_features(ans_url_df, "FacesAndLimbs_shuffled_hog_features.pickle")
###hog predict
fin=open('NoLims_shuffled_hog_features.pickle',"r")
feat_df= pickle.load(fin)
fin.close()
train_and_predict(feat_df,"NoLims_shuffled_hog_predict.pickle")
| bsd-3-clause |
mojoboss/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
rrohan/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
cl4rke/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
QuLogic/cartopy | lib/cartopy/tests/mpl/__init__.py | 2 | 12599 | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
import base64
import distutils
import os
import glob
import shutil
import warnings
import flufl.lock
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.testing import setup as mpl_setup
import matplotlib.testing.compare as mcompare
MPL_VERSION = distutils.version.LooseVersion(mpl.__version__)
class ImageTesting:
"""
Provides a convenient class for running visual Matplotlib tests.
In general, this class should be used as a decorator to a test function
which generates one (or more) figures.
::
@ImageTesting(['simple_test'])
def test_simple():
import matplotlib.pyplot as plt
plt.plot(range(10))
To find out where the result and expected images reside one can create
a empty ImageTesting class instance and get the paths from the
:meth:`expected_path` and :meth:`result_path` methods::
>>> import os
>>> import cartopy.tests.mpl
>>> img_testing = cartopy.tests.mpl.ImageTesting([])
>>> exp_fname = img_testing.expected_path('<TESTNAME>', '<IMGNAME>')
>>> result_fname = img_testing.result_path('<TESTNAME>', '<IMGNAME>')
>>> img_test_mod_dir = os.path.dirname(cartopy.__file__)
>>> print('Result:', os.path.relpath(result_fname, img_test_mod_dir))
... # doctest: +ELLIPSIS
Result: ...output/<TESTNAME>/result-<IMGNAME>.png
>>> print('Expected:', os.path.relpath(exp_fname, img_test_mod_dir))
Expected: tests/mpl/baseline_images/mpl/<TESTNAME>/<IMGNAME>.png
.. note::
Subclasses of the ImageTesting class may decide to change the
location of the expected and result images. However, the same
technique for finding the locations of the images should hold true.
"""
#: The path where the standard ``baseline_images`` exist.
root_image_results = os.path.dirname(__file__)
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(root_image_results, 'output')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise OSError('Write access to a local disk is required to run '
'image tests. Run the tests from a current working '
'directory you have write access to to avoid this '
'issue.')
else:
image_output_directory = os.path.join(os.getcwd(),
'cartopy_test_output')
def __init__(self, img_names, tolerance=0.5, style='classic'):
# With matplotlib v1.3 the tolerance keyword is an RMS of the pixel
# differences, as computed by matplotlib.testing.compare.calculate_rms
self.img_names = img_names
self.style = style
self.tolerance = tolerance
def expected_path(self, test_name, img_name, ext='.png'):
"""
Return the full path (minus extension) of where the expected image
should be found, given the name of the image being tested and the
name of the test being run.
"""
expected_fname = os.path.join(self.root_image_results,
'baseline_images', 'mpl', test_name,
img_name)
return expected_fname + ext
def result_path(self, test_name, img_name, ext='.png'):
"""
Return the full path (minus extension) of where the result image
should be given the name of the image being tested and the
name of the test being run.
"""
result_fname = os.path.join(self.image_output_directory,
test_name, 'result-' + img_name)
return result_fname + ext
def run_figure_comparisons(self, figures, test_name):
"""
Run the figure comparisons against the ``image_names``.
The number of figures passed must be equal to the number of
image names in ``self.image_names``.
.. note::
The figures are not closed by this method. If using the decorator
version of ImageTesting, they will be closed for you.
"""
n_figures_msg = ('Expected %s figures (based on the number of '
'image result filenames), but there are %s figures '
'available. The most likely reason for this is that '
'this test is producing too many figures, '
'(alternatively if not using ImageCompare as a '
'decorator, it is possible that a test run prior to '
'this one has not closed its figures).'
'' % (len(self.img_names), len(figures))
)
assert len(figures) == len(self.img_names), n_figures_msg
for img_name, figure in zip(self.img_names, figures):
expected_path = self.expected_path(test_name, img_name, '.png')
result_path = self.result_path(test_name, img_name, '.png')
if not os.path.isdir(os.path.dirname(expected_path)):
os.makedirs(os.path.dirname(expected_path))
if not os.path.isdir(os.path.dirname(result_path)):
os.makedirs(os.path.dirname(result_path))
with flufl.lock.Lock(result_path + '.lock'):
self.save_figure(figure, result_path)
self.do_compare(result_path, expected_path, self.tolerance)
def save_figure(self, figure, result_fname):
"""
The actual call which saves the figure.
Returns nothing.
May be overridden to do figure based pre-processing (such
as removing text objects etc.)
"""
figure.savefig(result_fname)
def do_compare(self, result_fname, expected_fname, tol):
"""
Runs the comparison of the result file with the expected file.
If an RMS difference greater than ``tol`` is found an assertion
error is raised with an appropriate message with the paths to
the files concerned.
"""
if not os.path.exists(expected_fname):
warnings.warn('Created image in %s' % expected_fname)
shutil.copy2(result_fname, expected_fname)
err = mcompare.compare_images(expected_fname, result_fname,
tol=tol, in_decorator=True)
if err:
msg = ('Images were different (RMS: %s).\n%s %s %s\nConsider '
'running idiff to inspect these differences.'
'' % (err['rms'], err['actual'],
err['expected'], err['diff']))
assert False, msg
def __call__(self, test_func):
"""Called when the decorator is applied to a function."""
test_name = test_func.__name__
mod_name = test_func.__module__
if mod_name == '__main__':
import sys
fname = sys.modules[mod_name].__file__
mod_name = os.path.basename(os.path.splitext(fname)[0])
mod_name = mod_name.rsplit('.', 1)[-1]
def wrapped(*args, **kwargs):
orig_backend = plt.get_backend()
plt.switch_backend('agg')
mpl_setup()
if plt.get_fignums():
warnings.warn('Figures existed before running the %s %s test.'
' All figures should be closed after they run. '
'They will be closed automatically now.' %
(mod_name, test_name))
plt.close('all')
with mpl.style.context(self.style):
if MPL_VERSION >= '3.2.0':
mpl.rcParams['text.kerning_factor'] = 6
r = test_func(*args, **kwargs)
figures = [plt.figure(num) for num in plt.get_fignums()]
try:
self.run_figure_comparisons(figures, test_name=mod_name)
finally:
for figure in figures:
plt.close(figure)
plt.switch_backend(orig_backend)
return r
# nose needs the function's name to be in the form "test_*" to
# pick it up
wrapped.__name__ = test_name
return wrapped
def failed_images_iter():
"""
Return a generator of [expected, actual, diff] filenames for all failed
image tests since the test output directory was created.
"""
baseline_img_dir = os.path.join(ImageTesting.root_image_results,
'baseline_images', 'mpl')
diff_dir = os.path.join(ImageTesting.image_output_directory)
baselines = sorted(glob.glob(os.path.join(baseline_img_dir,
'*', '*.png')))
for expected_fname in baselines:
# Get the relative path of the expected image 2 folders up.
expected_rel = os.path.relpath(
expected_fname, os.path.dirname(os.path.dirname(expected_fname)))
result_fname = os.path.join(
diff_dir, os.path.dirname(expected_rel),
'result-' + os.path.basename(expected_rel))
diff_fname = result_fname[:-4] + '-failed-diff.png'
if os.path.exists(diff_fname):
yield expected_fname, result_fname, diff_fname
def failed_images_html():
"""
Generates HTML which shows the image failures side-by-side
when viewed in a web browser.
"""
data_uri_template = '<img alt="{alt}" src="data:image/png;base64,{img}">'
def image_as_base64(fname):
with open(fname, "rb") as fh:
return base64.b64encode(fh.read()).decode("ascii")
html = ['<!DOCTYPE html>', '<html>', '<body>']
for expected, actual, diff in failed_images_iter():
expected_html = data_uri_template.format(
alt='expected', img=image_as_base64(expected))
actual_html = data_uri_template.format(
alt='actual', img=image_as_base64(actual))
diff_html = data_uri_template.format(
alt='diff', img=image_as_base64(diff))
html.extend([expected, '<br>',
expected_html, actual_html, diff_html,
'<br><hr>'])
html.extend(['</body>', '</html>'])
return '\n'.join(html)
def show(projection, geometry):
orig_backend = mpl.get_backend()
plt.switch_backend('tkagg')
if geometry.type == 'MultiPolygon' and 1:
multi_polygon = geometry
for polygon in multi_polygon:
import cartopy.mpl.patch as patch
paths = patch.geos_to_path(polygon)
for pth in paths:
patch = mpatches.PathPatch(pth, edgecolor='none',
lw=0, alpha=0.2)
plt.gca().add_patch(patch)
line_string = polygon.exterior
plt.plot(*zip(*line_string.coords),
marker='+', linestyle='-')
elif geometry.type == 'MultiPolygon':
multi_polygon = geometry
for polygon in multi_polygon:
line_string = polygon.exterior
plt.plot(*zip(*line_string.coords),
marker='+', linestyle='-')
elif geometry.type == 'MultiLineString':
multi_line_string = geometry
for line_string in multi_line_string:
plt.plot(*zip(*line_string.coords),
marker='+', linestyle='-')
elif geometry.type == 'LinearRing':
plt.plot(*zip(*geometry.coords), marker='+', linestyle='-')
if 1:
# Whole map domain
plt.autoscale()
elif 0:
# The left-hand triangle
plt.xlim(-1.65e7, -1.2e7)
plt.ylim(0.3e7, 0.65e7)
elif 0:
# The tip of the left-hand triangle
plt.xlim(-1.65e7, -1.55e7)
plt.ylim(0.3e7, 0.4e7)
elif 1:
# The very tip of the left-hand triangle
plt.xlim(-1.632e7, -1.622e7)
plt.ylim(0.327e7, 0.337e7)
elif 1:
# The tip of the right-hand triangle
plt.xlim(1.55e7, 1.65e7)
plt.ylim(0.3e7, 0.4e7)
plt.plot(*zip(*projection.boundary.coords), marker='o',
scalex=False, scaley=False, zorder=-1)
plt.show()
plt.switch_backend(orig_backend)
| lgpl-3.0 |
ningchi/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
all-umass/graphs | graphs/construction/incremental.py | 1 | 1809 | from __future__ import absolute_import
import numpy as np
from sklearn.metrics import pairwise_distances
from graphs import Graph
__all__ = ['incremental_neighbor_graph']
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None,
weighting='none'):
'''See neighbor_graph.'''
assert ((k is not None) or (epsilon is not None)
), "Must provide `k` or `epsilon`"
assert (_issequence(k) ^ _issequence(epsilon)
), "Exactly one of `k` or `epsilon` must be a sequence."
assert weighting in ('binary','none'), "Invalid weighting param: " + weighting
is_weighted = weighting == 'none'
if precomputed:
D = X
else:
D = pairwise_distances(X, metric='euclidean')
# pre-sort for efficiency
order = np.argsort(D)[:,1:]
if k is None:
k = D.shape[0]
# generate the sequence of graphs
# TODO: convert the core of these loops to Cython for speed
W = np.zeros_like(D)
I = np.arange(D.shape[0])
if _issequence(k):
# varied k, fixed epsilon
if epsilon is not None:
D[D > epsilon] = 0
old_k = 0
for new_k in k:
idx = order[:, old_k:new_k]
dist = D[I, idx.T]
W[I, idx.T] = dist if is_weighted else 1
yield Graph.from_adj_matrix(W)
old_k = new_k
else:
# varied epsilon, fixed k
idx = order[:,:k]
dist = D[I, idx.T].T
old_i = np.zeros(D.shape[0], dtype=int)
for eps in epsilon:
for i, row in enumerate(dist):
oi = old_i[i]
ni = oi + np.searchsorted(row[oi:], eps)
rr = row[oi:ni]
W[i, idx[i,oi:ni]] = rr if is_weighted else 1
old_i[i] = ni
yield Graph.from_adj_matrix(W)
def _issequence(x):
# Note: isinstance(x, collections.Sequence) fails for numpy arrays
return hasattr(x, '__len__')
| mit |
ilyes14/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
btgorman/RISE-power-water-ss-1phase | classes_power.py | 1 | 88826 | # Copyright 2017 Brandon T. Gorman
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BUILT USING PYTHON 3.6.0
import numpy as np
import pandas as pd
import math, random
import classes_water as ENC
SQRTTHREE = math.sqrt(3.)
class TemperatureDerating:
condmult = 1.0
loadmult = 1.0
genmult = 1.0
def condmult(cls):
pass
def loadmult(cls):
pass
def genmult(cls):
pass
class XYCurve: #errors -1000 to -1024
CLID = 1000
ID = 0
TYPE = 1
X_1_COORDINATE = 2
X_2_COORDINATE = 3
X_3_COORDINATE = 4
X_4_COORDINATE = 5
X_5_COORDINATE = 6
Y_1_COORDINATE = 7
Y_2_COORDINATE = 8
Y_3_COORDINATE = 9
Y_4_COORDINATE = 10
Y_5_COORDINATE = 11
NPTS = 5
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 0
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in XYCurve0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_self_name = str(int(row[XYCurve.TYPE])) + '_' + str(int(row[XYCurve.ID]))
if debug == 1:
print('New \'XYCurve.{}\' npts=\'{}\' xarray=[{:f} {:f} {:f} {:f} {:f}] yarray=[{:f} {:f} {:f} {:f} {:f}]\n'.format(
str_self_name, XYCurve.NPTS, row[XYCurve.X_1_COORDINATE], row[XYCurve.X_2_COORDINATE],
row[XYCurve.X_3_COORDINATE], row[XYCurve.X_4_COORDINATE], row[XYCurve.X_5_COORDINATE], row[XYCurve.Y_1_COORDINATE],
row[XYCurve.Y_2_COORDINATE], row[XYCurve.Y_3_COORDINATE], row[XYCurve.Y_4_COORDINATE], row[XYCurve.Y_5_COORDINATE]))
dss.Command = 'New \'XYCurve.{}\' npts=\'{}\' xarray=[{:f} {:f} {:f} {:f} {:f}] yarray=[{:f} {:f} {:f} {:f} {:f}]'.format(
str_self_name, XYCurve.NPTS, row[XYCurve.X_1_COORDINATE], row[XYCurve.X_2_COORDINATE],
row[XYCurve.X_3_COORDINATE], row[XYCurve.X_4_COORDINATE], row[XYCurve.X_5_COORDINATE], row[XYCurve.Y_1_COORDINATE],
row[XYCurve.Y_2_COORDINATE], row[XYCurve.Y_3_COORDINATE], row[XYCurve.Y_4_COORDINATE], row[XYCurve.Y_5_COORDINATE])
return 0
except:
print('Error: #-1000')
return -1000
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
return 0
except:
print('Error: #-1004')
return -1004
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1006')
return -1006
def returnWindGenFraction(self, curve_id, wind_fraction):
try:
for row in self.matrix:
if row[XYCurve.ID] == curve_id:
gen_fraction = 0.0
if wind_fraction < row[XYCurve.Y_1_COORDINATE]:
gen_fraction = 0.0
elif wind_fraction > row[XYCurve.Y_5_COORDINATE]:
gen_fraction = 1.0
elif wind_fraction < row[XYCurve.Y_2_COORDINATE]:
gen_fraction = row[XYCurve.X_1_COORDINATE] + (wind_fraction - row[XYCurve.Y_1_COORDINATE]) * (row[XYCurve.X_2_COORDINATE] - row[XYCurve.X_1_COORDINATE]) / (row[XYCurve.Y_2_COORDINATE] - row[XYCurve.Y_1_COORDINATE])
elif wind_fraction < row[XYCurve.Y_3_COORDINATE]:
gen_fraction = row[XYCurve.X_2_COORDINATE] + (wind_fraction - row[XYCurve.Y_2_COORDINATE]) * (row[XYCurve.X_3_COORDINATE] - row[XYCurve.X_2_COORDINATE]) / (row[XYCurve.Y_3_COORDINATE] - row[XYCurve.Y_2_COORDINATE])
elif wind_fraction < row[XYCurve.Y_4_COORDINATE]:
gen_fraction = row[XYCurve.X_3_COORDINATE] + (wind_fraction - row[XYCurve.Y_3_COORDINATE]) * (row[XYCurve.X_4_COORDINATE] - row[XYCurve.X_3_COORDINATE]) / (row[XYCurve.Y_4_COORDINATE] - row[XYCurve.Y_3_COORDINATE])
else:
gen_fraction = row[XYCurve.X_4_COORDINATE] + (wind_fraction - row[XYCurve.Y_4_COORDINATE]) * (row[XYCurve.X_5_COORDINATE] - row[XYCurve.X_4_COORDINATE]) / (row[XYCurve.Y_5_COORDINATE] - row[XYCurve.Y_4_COORDINATE])
return gen_fraction
print('Error: #-1007')
except:
print('Error: #-1008')
return -1008
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
print('Error: #-1009')
return -1009
def convertToOutputTensor(self):
try:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
return 0
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class RegControl: #errors -1050 to -1074
CLID = 1100
ID = 0
TYPE = 1
TRANSFORMER_ID = 2
BANDWIDTH = 3
CT_RATING = 4
PT_RATIO = 5
R1 = 6
REGULATOR_VOLTAGE = 7
X1 = 8
def __init__(self,dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 0
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in RegControl0')
def createAllDSS(self, dss, interconn_dict, debug):
return 0
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
return 0
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1056')
return -1056
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
pass
def convertToOutputTensor(self):
try:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
pass
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class WireData: #errors -1100 to -1124
CLID = 1200
ID = 0
TYPE = 1
DIAMETER = 2
GMR = 3
NORMAL_AMPS = 4
R_SCALAR = 5
MAX_PU_CAPACITY = 6
RESISTANCE_UNITS = 'kft'
GMR_UNITS = 'ft'
DIAMETER_UNITS = 'in'
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 0
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in WireData0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_self_name = str(int(row[WireData.TYPE])) + '_' + str(int(row[WireData.ID]))
if debug == 1:
print('New \'WireData.{}\' Diam=\'{:f}\' Radunit=\'{}\' GMRac=\'{:f}\' GMRunits=\'{}\' Rac=\'{:f}\' Runits=\'{}\' Normamps=\'{:f}\' Emergamps=\'{:f}\'\n'.format(
str_self_name, row[WireData.DIAMETER], WireData.DIAMETER_UNITS, row[WireData.GMR],
WireData.GMR_UNITS, row[WireData.R_SCALAR], WireData.RESISTANCE_UNITS, row[WireData.NORMAL_AMPS],
row[WireData.NORMAL_AMPS]*row[WireData.MAX_PU_CAPACITY]))
dss.Command = 'New \'WireData.{}\' Diam=\'{:f}\' Radunit=\'{}\' GMRac=\'{:f}\' GMRunits=\'{}\' Rac=\'{:f}\' Runits=\'{}\' Normamps=\'{:f}\' Emergamps=\'{:f}\''.format(
str_self_name, row[WireData.DIAMETER], WireData.DIAMETER_UNITS, row[WireData.GMR],
WireData.GMR_UNITS, row[WireData.R_SCALAR], WireData.RESISTANCE_UNITS, row[WireData.NORMAL_AMPS],
row[WireData.NORMAL_AMPS]*row[WireData.MAX_PU_CAPACITY])
return 0,
except:
print('Error: #-1100')
return -1100
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
return 0
except:
print('Error: #-1104')
return -1104
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1106')
return -1106
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
return 0
def convertToOutputTensor(self):
try:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
return 0
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class LineCode: #errors -1125 to -1149
CLID = 1201
ID = 0
TYPE = 1
KRON_REDUCTION = 2
NUMBER_OF_PHASES = 3
R0_SCALAR = 4
R1_SCALAR = 5
X0_SCALAR = 6
X1_SCALAR = 7
C0_SCALAR = 8
C1_SCALAR = 9
B0_SCALAR = 10
B1_SCALAR = 11
UNITS = 'kft'
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 0
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in LineCode0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
neutral_reduce = 'N'
if row[LineCode.KRON_REDUCTION] == 1.0:
neutral_reduce = 'Y'
str_self_name = str(int(row[LineCode.TYPE])) + '_' + str(int(row[LineCode.ID]))
if row[LineCode.C0_SCALAR] != 0.0 or row[LineCode.C1_SCALAR] != 0.0:
str_impedance = 'R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\' C0=\'{:f}\' C1=\'{:f}\''.format(
row[LineCode.R0_SCALAR], row[LineCode.R1_SCALAR], row[LineCode.X0_SCALAR], row[LineCode.X1_SCALAR],
row[LineCode.C0_SCALAR], row[LineCode.C1_SCALAR])
elif row[LineCode.B0_SCALAR] != 0.0 or row[LineCode.B1_SCALAR] != 0.0:
str_impedance = 'R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\' B0=\'{:f}\' B1=\'{:f}\''.format(
row[LineCode.R0_SCALAR], row[LineCode.R1_SCALAR], row[LineCode.X0_SCALAR], row[LineCode.X1_SCALAR],
row[LineCode.B0_SCALAR], row[LineCode.B1_SCALAR])
else:
str_impedance = 'R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\''.format(
row[LineCode.R0_SCALAR], row[LineCode.R1_SCALAR], row[LineCode.X0_SCALAR], row[LineCode.X1_SCALAR])
if debug == 1:
print('New \'LineCode.{}\' Nphases=\'{}\' {} Units=\'{}\' Kron=\'{}\''.format(
str_self_name, int(row[LineCode.NUMBER_OF_PHASES]), str_impedance, LineCode.UNITS,
neutral_reduce))
print('\n')
dss.Command = 'New \'LineCode.{}\' Nphases=\'{}\' {} Units=\'{}\' Kron=\'{}\''.format(
str_self_name, int(row[LineCode.NUMBER_OF_PHASES]), str_impedance, LineCode.UNITS,
neutral_reduce)
return 0
except:
print('Error: #-1125')
return -1125
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
return 0
except:
print('Error: #-1129')
return -1129
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1131')
return -1131
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
print('Error: #-1132')
return -1132
def convertToOutputTensor(self):
try:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
return 0
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class Bus: #errors -1150 to -1174
CLID = 1300
ID = 0
TYPE = 1
FUNCTIONAL_STATUS = 2 # switch
NOMINAL_LL_VOLTAGE = 3
A = 4
MIN_PU_VOLTAGE = 5
MAX_PU_VOLTAGE = 6
OPERATIONAL_STATUS = 7 # switch
A_PU_VOLTAGE = 8
A_VOLTAGE = 9
A_VOLTAGE_ANGLE = 10
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 0 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in Bus0')
def createAllDSS(self, dss, interconn_dict, debug):
return 0
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.SetActiveBus( str(Bus.CLID) + '_' + str(int(row[Bus.ID])) )
var_volt_mag = list(dssActvBus.VMagAngle)
var_volt_pu = list(dssActvBus.puVmagAngle)
row[Bus.A_PU_VOLTAGE] = 0.0
row[Bus.A_VOLTAGE] = var_volt_mag[idxcount*2]
row[Bus.A_VOLTAGE_ANGLE] = var_volt_mag[idxcount*2 + 1]
row[Bus.A_PU_VOLTAGE] = var_volt_pu[idxcount*2]
idxcount += 1
return 0
except:
print('Error: #-1152')
return -1152
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1154')
return -1154
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
print('Error: #-1155')
return -1155
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['a_PU_voltage']
for row in self.matrix:
for elem in output_col:
output_list.append('Bus_' + str(int(row[Bus.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1156')
return -1156
def randomStochasticity(self):
pass
def randomSwitching(self):
try:
row = random.randrange(0, self.num_components)
self.matrix[row, Bus.OPERATIONAL_STATUS] = 0.0
except:
print('Error: #-1157')
return -1157
class VSource: #errors -1175 to -1199
CLID = 1301
ID = 0
TYPE = 1
FUNCTIONAL_STATUS = 2 # switch
NOMINAL_LL_VOLTAGE = 3
A = 4
EXPECTED_GENERATION = 5
R0 = 6
R1 = 7
VOLTAGE_ANGLE = 8
X0 = 9
X1 = 10
MIN_PU_VOLTAGE = 11
MAX_PU_VOLTAGE = 12
MAX_AREA_CONTROL_ERROR = 13
OPERATIONAL_STATUS = 14 # switch
A_PU_VOLTAGE = 15
A_VOLTAGE = 16
A_VOLTAGE_ANGLE = 17
A_CURRENT = 18
A_CURRENT_ANGLE = 19
REAL_POWER = 20
REACTIVE_POWER = 21
AREA_CONTROL_ERROR = 22
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 0 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in VSource0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_self_name = str(int(row[VSource.TYPE])) + '_' + str(int(row[VSource.ID]))
num_phases = 0
mvasc1 = 100
mvasc3 = 300
init_volt_pu = 1.0
voltage_rating = row[VSource.NOMINAL_LL_VOLTAGE]
if row[VSource.A] == 1.0:
num_phases += 1
if num_phases == 1:
voltage_rating = voltage_rating / math.sqrt(3.0)
if debug == 1:
print('New \'Circuit.{}\' Basekv=\'{:f}\' phases=\'{}\' pu=\'{:f}\' Angle=\'{:f}\' Mvasc1=\'{:f}\' Mvasc3=\'{:f}\' R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\'\n'.format(
str_self_name, voltage_rating, num_phases, init_volt_pu, row[VSource.VOLTAGE_ANGLE],
mvasc1, mvasc3, row[VSource.R0], row[VSource.R1],
row[VSource.X0], row[VSource.X1]))
dss.Command = 'New \'Circuit.{}\' Basekv=\'{:f}\' phases=\'{}\' pu=\'{:f}\' Angle=\'{:f}\' Mvasc1=\'{:f}\' Mvasc3=\'{:f}\' R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\''.format(
str_self_name, voltage_rating, num_phases, init_volt_pu, row[VSource.VOLTAGE_ANGLE],
mvasc1, mvasc3, row[VSource.R0], row[VSource.R1],
row[VSource.X0], row[VSource.X1])
return 0
except:
print('Error: #-1175')
return -1175
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix: # ONLY RETRIEVES THE ONE VSOURCE SOURCEBUS
idxcount = 0
dssCkt.SetActiveBus('sourcebus')
dssCkt.Vsources.Name = 'source'
var_volt_mag = list(dssActvBus.VMagAngle)
var_volt_pu = list(dssActvBus.puVmagAngle)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[VSource.A_PU_VOLTAGE : VSource.AREA_CONTROL_ERROR+1] = 0.0
if row[VSource.A] == 1.0:
row[VSource.A_VOLTAGE] = var_volt_mag[idxcount*2]
row[VSource.A_VOLTAGE_ANGLE] = var_volt_mag[idxcount*2 + 1]
row[VSource.A_PU_VOLTAGE] = var_volt_pu[idxcount*2]
row[VSource.A_CURRENT] = var_curr[idxcount*2]
row[VSource.A_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[VSource.REAL_POWER] += var_pow[idxcount*2]
row[VSource.REACTIVE_POWER] += var_pow[idxcount*2 + 1]
idxcount += 1
return 0
except:
print('Error: #-1179')
return -1179
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1181')
return -1181
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
print('Error: #-1182')
return -1182
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['a_PU_voltage', 'a_current']
for row in self.matrix:
for elem in output_col:
output_list.append('VSource_' + str(int(row[VSource.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1183')
return -1183
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class Generator: #errors -1200 to -1224
CLID = 1302
ID = 0
TYPE = 1
FUNCTIONAL_STATUS = 2 # switch
NOMINAL_LL_VOLTAGE = 3
A = 4
REAL_GENERATION = 5 # stochastic, temporary
REACTIVE_GENERATION = 6
MODEL = 7
RAMP_RATE = 8
REAL_GENERATION_MIN_RATING = 9
REAL_GENERATION_MAX_RATING = 10
REACTIVE_GENERATION_MIN_RATING = 11
REACTIVE_GENERATION_MAX_RATING = 12
WATER_CONSUMPTION = 13
WATER_DERATING = 14
WIRING = 15
JUNCTION_ID = 16
MIN_PU_VOLTAGE = 17
MAX_PU_VOLTAGE = 18
OPERATIONAL_STATUS = 19 # switch
REAL_GENERATION_CONTROL = 20 # stochastic TODO unused
REACTIVE_GENERATION_CONTROL = 21 # stochastic TODO unused
A_PU_VOLTAGE = 22
A_VOLTAGE = 23
A_VOLTAGE_ANGLE = 24
A_CURRENT = 25
A_CURRENT_ANGLE = 26
REAL_POWER = 27
REACTIVE_POWER = 28
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 2
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in Generator0')
# TO DO: Code ramp up and down change
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_bus_conn = ''
str_conn = 'wye'
num_phases = 0
num_kv = row[Generator.NOMINAL_LL_VOLTAGE]
derating = 1.0
if row[Generator.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if num_phases == 0:
print('Error: #-1201')
if row[Generator.WIRING] == 0.0:
str_conn = 'delta'
elif num_phases == 1:
num_kv = num_kv / math.sqrt(3.0)
# FIX THIS TO ACCOUNT FOR ROUNDING ERRORS
if row[Generator.OPERATIONAL_STATUS] == 0.0 or row[Generator.REAL_GENERATION] == 0.0:
row[Generator.REAL_GENERATION] = 0.0
row[Generator.REACTIVE_GENERATION] = 0.0
row[Generator.OPERATIONAL_STATUS] = 0.0
else:
if row[Generator.REAL_GENERATION] > row[Generator.REAL_GENERATION_MAX_RATING]:
row[Generator.REAL_GENERATION] = row[Generator.REAL_GENERATION_MAX_RATING]
elif row[Generator.REAL_GENERATION] < row[Generator.REAL_GENERATION_MIN_RATING]:
row[Generator.REAL_GENERATION] = row[Generator.REAL_GENERATION_MIN_RATING]
if row[Generator.REACTIVE_GENERATION] > row[Generator.REACTIVE_GENERATION_MAX_RATING]:
row[Generator.REACTIVE_GENERATION] = row[Generator.REACTIVE_GENERATION_MAX_RATING]
elif row[Generator.REACTIVE_GENERATION] < row[Generator.REACTIVE_GENERATION_MIN_RATING]:
row[Generator.REACTIVE_GENERATION] = row[Generator.REACTIVE_GENERATION_MIN_RATING]
busid = int(row[Generator.ID]) % 100
str_self_name = str(int(row[Generator.TYPE])) + '_' + str(int(row[Generator.ID]))
str_bus_name = str(Bus.CLID) + '_' + str(busid)
if debug == 1:
print('New \'Generator.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Kvar=\'{:f}\' Model=\'{}\' Conn=\'{}\'\n'.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[Generator.REAL_GENERATION]*derating, row[Generator.REACTIVE_GENERATION]*derating, int(row[Generator.MODEL]),
str_conn))
if row[Generator.FUNCTIONAL_STATUS]*row[Generator.OPERATIONAL_STATUS] == 0.0:
print('Disable \'Generator.{}\''.format(str_self_name))
dss.Command = 'New \'Generator.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Kvar=\'{:f}\' Model=\'{}\' Conn=\'{}\''.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[Generator.REAL_GENERATION]*derating, row[Generator.REACTIVE_GENERATION]*derating, int(row[Generator.MODEL]),
str_conn)
if row[Generator.FUNCTIONAL_STATUS]*row[Generator.OPERATIONAL_STATUS] == 0.0:
dss.Command = 'Disable \'Generator.{}\''.format(str_self_name)
return 0
except:
print('Error: #-1200')
return -1200
def voltagesToSets(self):
try:
return set(self.matrix[:, Generator.NOMINAL_LL_VOLTAGE])
except:
print('Error: #-1204')
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
counter = 0
for row in self.matrix:
idxcount = 0
busid = int(row[Generator.ID]) % 100
dssCkt.SetActiveBus(str(Bus.CLID) + '_' + str(busid))
dssCkt.Generators.Name = str(int(row[Generator.TYPE])) + '_' + str(int(row[Generator.ID]))
var_volt_mag = list(dssActvBus.VMagAngle)
var_volt_pu = list(dssActvBus.puVmagAngle)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[Generator.A_PU_VOLTAGE : Generator.REACTIVE_POWER+1] = 0.0
if row[Generator.A] == 1.0:
row[Generator.A_VOLTAGE] = var_volt_mag[idxcount*2]
row[Generator.A_VOLTAGE_ANGLE] = var_volt_mag[idxcount*2 + 1]
row[Generator.A_PU_VOLTAGE] = var_volt_pu[idxcount*2]
row[Generator.A_CURRENT] = var_curr[idxcount*2]
row[Generator.A_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[Generator.REAL_POWER] += var_pow[idxcount*2]
row[Generator.REACTIVE_POWER] += var_pow[idxcount*2 + 1]
idxcount += 1
return 0
except:
print('Error: #-1206')
return -1206
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1208')
return -1208
def convertToInputTensor(self):
try:
input_list_continuous = []
input_list_categorical = []
input_col_continuous = ['real_generation', 'reactive_generation']
input_col_categorical = ['operational_status']
for row in self.matrix:
for elem in input_col_continuous:
input_list_continuous.append('Generator_' + str(int(row[Generator.ID])) + '_' + elem)
for elem in input_col_categorical:
input_list_categorical.append('Generator_' + str(int(row[Generator.ID])) + '_' + elem)
inputdf = self.convertToDataFrame()
inputdf_continuous = inputdf[input_col_continuous]
inputdf_categorical = inputdf[input_col_categorical]
return input_list_continuous, input_list_categorical, inputdf_continuous.values.flatten(), inputdf_categorical.values.flatten()
except:
print('Error: #-1209')
return -1209
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['a_PU_voltage', 'a_current']
for row in self.matrix:
for elem in output_col:
output_list.append('Generator_' + str(int(Generator.ID)) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1210')
return -1210
def randomStochasticity(self):
try:
row = random.randrange(0, self.num_components)
if random.randrange(0, 2) == 0:
real_generation_max = self.matrix[row, Generator.REAL_GENERATION_MAX_RATING]
real_generation_min = self.matrix[row, Generator.REAL_GENERATION_MIN_RATING]
rval = random.normalvariate(0, 0.5 * (real_generation_max - real_generation_min) * 0.04)
self.matrix[row, Generator.REAL_GENERATION] += rval
if self.matrix[row, Generator.REAL_GENERATION] > real_generation_max:
self.matrix[row, Generator.REAL_GENERATION] = real_generation_max
elif self.matrix[row, Generator.REAL_GENERATION] < real_generation_min:
self.matrix[row, Generator.REAL_GENERATION] = real_generation_min
else:
reactive_generation_max = self.matrix[row, Generator.REACTIVE_GENERATION_MAX_RATING]
reactive_generation_min = self.matrix[row, Generator.REACTIVE_GENERATION_MIN_RATING]
rval = random.normalvariate(0, 0.5 * (reactive_generation_max - reactive_generation_min) * 0.04)
self.matrix[row, Generator.POWER_FACTOR_CONTROL] += rval
if self.matrix[row, Generator.POWER_FACTOR_CONTROL] > reactive_generation_max:
self.matrix[row, Generator.POWER_FACTOR_CONTROL] = reactive_generation_max
elif self.matrix[row, Generator.POWER_FACTOR_CONTROL] < reactive_generation_min:
self.matrix[row, Generator.POWER_FACTOR_CONTROL] = reactive_generation_min
except:
print('Error: #1211')
return -1211
def randomSwitching(self):
try:
row = random.randrange(0, self.num_components)
self.matrix[row, Generator.OPERATIONAL_STATUS] = 0.0
except:
print('Error: #1212')
return -1212
class Load: #errors -1225 to -1249
CLID = 1303
ID = 0
TYPE = 1
FUNCTIONAL_STATUS = 2 # switch
NOMINAL_LL_VOLTAGE = 3
A = 4
REAL_LOAD_MAX = 5
REACTIVE_LOAD_MAX = 6
MODEL = 7
WIRING = 8
REAL_LOAD = 9
REACTIVE_LOAD = 10
INTERCONNECTION_LOAD = 11
MIN_PU_VOLTAGE = 12
MAX_PU_VOLTAGE = 13
OPERATIONAL_STATUS = 14 # switch
A_PU_VOLTAGE = 15
A_VOLTAGE = 16
A_VOLTAGE_ANGLE = 17
A_CURRENT = 18
A_CURRENT_ANGLE = 19
REAL_POWER = 20
REACTIVE_POWER = 21
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 1 # temporary ?
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in Load0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_bus_conn = ''
str_conn = 'wye'
num_phases = 0
num_kv = row[Load.NOMINAL_LL_VOLTAGE]
if row[Load.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if num_phases == 0:
print('Error: #-1226')
if row[Load.WIRING] == 0.0:
str_conn = 'delta'
elif num_phases == 1:
num_kv = num_kv / math.sqrt(3.0)
str_self_name = str(int(row[Load.TYPE])) + '_' + str(int(row[Load.ID])) + '_' + str(int(row[Load.A]))
str_bus_name = str(Bus.CLID) + '_' + str(int(row[Load.ID]))
if row[Load.REAL_LOAD] < 0.0:
row[Load.REAL_LOAD] = 0.0
elif row[Load.REAL_LOAD] > row[Load.REAL_LOAD_MAX]:
row[Load.REAL_LOAD] = row[Load.REAL_LOAD_MAX]
if row[Load.REACTIVE_LOAD] < 0.0:
row[Load.REACTIVE_LOAD] = 0.0
elif row[Load.REACTIVE_LOAD] > row[Load.REACTIVE_LOAD_MAX]:
row[Load.REACTIVE_LOAD] = row[Load.REACTIVE_LOAD_MAX]
if debug == 1:
if row[Load.MODEL] == 8.0:
print('Zip model not included!\n')
print('New \'Load.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Kvar=\'{:f}\' Model=\'{}\' ZIPV=[{:f} {:f} {:f} {:f} {:f} {:f} {:f}] Conn=\'{}\' Vminpu=\'{:f}\' Vmaxpu=\'{:f}\'\n'.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[Load.REAL_LOAD] + row[Load.INTERCONNECTION_LOAD], row[Load.REACTIVE_LOAD], int(row[Load.MODEL]),
row[Load.ZIP_REAL_POWER], row[Load.ZIP_REAL_CURRENT], row[Load.ZIP_REAL_IMPEDANCE], row[Load.ZIP_REACTIVE_POWER],
row[Load.ZIP_REACTIVE_CURRENT], row[Load.ZIP_REACTIVE_IMPEDANCE], row[Load.ZIP_PU_VOLTAGE_CUTOFF], str_conn,
row[Load.MIN_PU_VOLTAGE], row[Load.MAX_PU_VOLTAGE]))
else:
print('New \'Load.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Kvar=\'{:f}\' Model=\'{}\' Conn=\'{}\' Vminpu=\'{:f}\' Vmaxpu=\'{:f}\'\n'.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[Load.REAL_LOAD] + row[Load.INTERCONNECTION_LOAD], row[Load.REACTIVE_LOAD], int(row[Load.MODEL]),
str_conn, row[Load.MIN_PU_VOLTAGE], row[Load.MAX_PU_VOLTAGE]))
if row[Load.FUNCTIONAL_STATUS]*row[Load.OPERATIONAL_STATUS] == 0.0:
print('Disable \'Load.{}\''.format(str_self_name))
if row[Load.MODEL] == 8.0:
dss.Command = 'New \'Load.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Kvar=\'{:f}\' Model=\'{}\' ZIPV=[{:f} {:f} {:f} {:f} {:f} {:f} {:f}] Conn=\'{}\' Vminpu=\'{:f}\' Vmaxpu=\'{:f}\''.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[Load.REAL_LOAD] + row[Load.INTERCONNECTION_LOAD], row[Load.REACTIVE_LOAD], int(row[Load.MODEL]),
row[Load.ZIP_REAL_POWER], row[Load.ZIP_REAL_CURRENT], row[Load.ZIP_REAL_IMPEDANCE], row[Load.ZIP_REACTIVE_POWER],
row[Load.ZIP_REACTIVE_CURRENT], row[Load.ZIP_REACTIVE_IMPEDANCE], row[Load.ZIP_PU_VOLTAGE_CUTOFF], str_conn,
row[Load.MIN_PU_VOLTAGE], row[Load.MAX_PU_VOLTAGE])
if row[Load.ZIP_REAL_POWER] + row[Load.ZIP_REAL_CURRENT] + row[Load.ZIP_REAL_IMPEDANCE] != 1.0 or row[Load.ZIP_REACTIVE_POWER] + row[Load.ZIP_REACTIVE_CURRENT] + row[Load.ZIP_REACTIVE_IMPEDANCE] != 1.0:
print('Error: #-1228')
else:
dss.Command = 'New \'Load.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Kvar=\'{:f}\' Model=\'{}\' Conn=\'{}\' Vminpu=\'{:f}\' Vmaxpu=\'{:f}\''.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[Load.REAL_LOAD] + row[Load.INTERCONNECTION_LOAD], row[Load.REACTIVE_LOAD], int(row[Load.MODEL]),
str_conn, row[Load.MIN_PU_VOLTAGE], row[Load.MAX_PU_VOLTAGE])
if row[Load.FUNCTIONAL_STATUS]*row[Load.OPERATIONAL_STATUS] == 0.0:
dss.Command = 'Disable \'Load.{}\''.format(str_self_name)
return 0
except:
print('Error: #-1225')
return -1225
def voltagesToSets(self):
try:
return set(self.matrix[:, Load.NOMINAL_LL_VOLTAGE])
except:
print('Error: #-1229')
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.SetActiveBus(str(Bus.CLID) + '_' + str(int(row[Load.ID])))
dssCkt.Loads.Name = str(int(row[Load.TYPE])) + '_' + str(int(row[Load.ID])) + '_' + str(int(row[Load.A]))
var_volt_mag = list(dssActvBus.VMagAngle)
var_volt_pu = list(dssActvBus.puVmagAngle)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[Load.A_PU_VOLTAGE : Load.REACTIVE_POWER+1] = 0.0
if row[Load.A] == 1.0:
row[Load.A_VOLTAGE] = var_volt_mag[idxcount*2]
row[Load.A_VOLTAGE_ANGLE] = var_volt_mag[idxcount*2 + 1]
row[Load.A_PU_VOLTAGE] = var_volt_pu[idxcount*2]
row[Load.A_CURRENT] = var_curr[idxcount*2]
row[Load.A_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[Load.REAL_POWER] += var_pow[idxcount*2]
row[Load.REACTIVE_POWER] += var_pow[idxcount*2 + 1]
idxcount += 1
return 0
except:
print('Error: #-1231')
return -1231
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1233')
return -1233
def convertToInputTensor(self):
try:
input_list_continuous = []
input_list_categorical = []
input_col_continuous = ['real_load', 'reactive_load']
input_col_categorical = ['operational_status']
for row in self.matrix:
for elem in input_col_continuous:
input_list_continuous.append('Load_' + str(int(row[Load.ID])) + '_' + elem)
for elem in input_col_categorical:
input_list_categorical.append('Load_' + str(int(row[Load.ID])) + '_' + elem)
inputdf = self.convertToDataFrame()
inputdf_continuous = inputdf[input_col_continuous]
inputdf_categorical = inputdf[input_col_categorical]
return input_list_continuous, input_list_categorical, inputdf_continuous.values.flatten(), inputdf_categorical.values.flatten()
except:
print('Error: #-1234')
return -1234
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['a_PU_voltage', 'a_current']
for row in self.matrix:
for elem in output_col:
output_list.append('Load_' + str(int(row[Load.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1235')
return -1235
def randomStochasticity(self):
try:
row = random.randrange(0, self.num_components)
rval = random.normalvariate(0, 0.5*self.matrix[row, Load.REAL_LOAD_MAX]*0.06)
self.matrix[row, Load.REAL_LOAD] += rval
if self.matrix[row, Load.REAL_LOAD] > self.matrix[row, Load.REAL_LOAD_MAX]:
self.matrix[row, Load.REAL_LOAD] = self.matrix[row, Load.REAL_LOAD_MAX]
elif self.matrix[row, Load.REAL_LOAD] < 0.0:
self.matrix[row, Load.REAL_LOAD] = 0.0
except:
print('Error: #-1236')
return -1236
def randomSwitching(self):
try:
row = random.randrange(0, self.num_components)
self.matrix[row, Load.OPERATIONAL_STATUS] = 0.0
except:
print('Error: #-1237')
return -1237
def multiplyLoadFactor(self, real_load_factor, reactive_load_factor):
try:
if real_load_factor < 0. or real_load_factor > 1. or reactive_load_factor < 0. or reactive_load_factor > 1.:
print('Error: #DirectConnection')
self.matrix[:, Load.REAL_LOAD] = self.matrix[:, Load.REAL_LOAD_MAX] * real_load_factor
if reactive_load_factor == 0.:
self.matrix[:, Load.REACTIVE_LOAD] = self.matrix[:, Load.REACTIVE_LOAD_MAX] * real_load_factor
else:
self.matrix[:, Load.REACTIVE_LOAD] = self.matrix[:, Load.REAL_LOAD] * (math.sqrt(1.0**2 - reactive_load_factor**2) / reactive_load_factor)
except:
print('Error: #-1238')
return -1238
def setInterconnectionLoad(self, interconn_dict):
try:
object_pumpvalve = interconn_dict['pumpvalve']
for load in self.matrix:
for pumpvalve in object_pumpvalve.matrix:
if load[Load.ID] == pumpvalve[ENC.PumpValve.LOAD_ID]:
load[Load.INTERCONNECTION_LOAD] += pumpvalve[ENC.PumpValve.OPERATIONAL_STATUS] * pumpvalve[ENC.PumpValve.POWER_CONSUMPTION]
except:
print('Error: #-1240')
return -1240
class SolarPV: #errors -1250 to -1274
CLID = 1304
ID = 0
TYPE = 1
FUNCTIONAL_STATUS = 2 # switch
NOMINAL_LL_VOLTAGE = 3
A = 4
CUT_IN_PERCENT = 5
CUT_OUT_PERCENT = 6
MIN_POWER_FACTOR = 7
MODEL = 8
PVEFF_CURVE_ID = 9
PVTEMP_CURVE_ID = 10
RATED_INVERTER = 11
RATED_CAPACITY = 12
WIRING = 13
IRRADIANCE = 14 # stochastic
MIN_PU_VOLTAGE = 15
MAX_PU_VOLTAGE = 16
OPERATIONAL_STATUS = 17 # switch
POWER_FACTOR_CONTROL = 18 # stochastic
A_PU_VOLTAGE = 19
A_VOLTAGE = 20
A_VOLTAGE_ANGLE = 21
A_CURRENT = 22
A_CURRENT_ANGLE = 23
REAL_POWER = 24
REACTIVE_POWER = 25
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 2 # temperature?
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in SolarPV0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_bus_conn = ''
str_conn = 'wye'
num_phases = 0
num_kv = row[SolarPV.NOMINAL_LL_VOLTAGE]
value_temperature = 35.0 # Celsius
if row[SolarPV.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if num_phases == 0:
print('Error: #-1251')
if row[SolarPV.WIRING] == 0.0:
str_conn = 'delta'
elif num_phases == 1:
num_kv = num_kv / math.sqrt(3.0)
if math.fabs(row[SolarPV.POWER_FACTOR_CONTROL]) < math.fabs(row[SolarPV.MIN_POWER_FACTOR]):
print('Error: SolarPV#')
str_bus_name = str(Bus.CLID) + '_' + str(int(row[SolarPV.ID]))
str_self_name = str(int(row[SolarPV.TYPE])) + '_' + str(int(row[SolarPV.ID])) # NOT BASED ON PHASES LIKE LOAD COMPONENTS
if debug == 1:
print('New \'PVSystem.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kva={:f} Pf=\'{:f}\' Model=\'{}\' Conn=\'{}\' Vminpu=\'{:f}\' Vmaxpu=\'{:f}\' %Cutin=\'{:f}\' %Cutout=\'{:f}\' Pmpp=\'{:f}\' Irradiance=\'{:f}\' EffCurve=\'{}_{}\' Temperature=\'{:f}\' P-TCurve=\'{}_{}\'\n'.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[SolarPV.RATED_INVERTER], row[SolarPV.POWER_FACTOR_CONTROL], int(row[SolarPV.MODEL]),
str_conn, row[SolarPV.MIN_PU_VOLTAGE], row[SolarPV.MAX_PU_VOLTAGE], row[SolarPV.CUT_IN_PERCENT],
row[SolarPV.CUT_OUT_PERCENT], row[SolarPV.RATED_CAPACITY], row[SolarPV.IRRADIANCE], XYCurve.CLID,
int(row[SolarPV.PVEFF_CURVE_ID]), value_temperature, XYCurve.CLID, int(row[SolarPV.PVTEMP_CURVE_ID])))
if row[SolarPV.FUNCTIONAL_STATUS]*row[SolarPV.OPERATIONAL_STATUS] == 0.0:
print('Disable \'PVSystem.{}\''.format(str_self_name))
dss.Command = 'New \'PVSystem.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kva={:f} Pf=\'{:f}\' Model=\'{}\' Conn=\'{}\' Vminpu=\'{:f}\' Vmaxpu=\'{:f}\' %Cutin=\'{:f}\' %Cutout=\'{:f}\' Pmpp=\'{:f}\' Irradiance=\'{:f}\' EffCurve=\'{}_{}\' Temperature=\'{:f}\' P-TCurve=\'{}_{}\''.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, row[SolarPV.RATED_INVERTER], row[SolarPV.POWER_FACTOR_CONTROL], int(row[SolarPV.MODEL]),
str_conn, row[SolarPV.MIN_PU_VOLTAGE], row[SolarPV.MAX_PU_VOLTAGE], row[SolarPV.CUT_IN_PERCENT],
row[SolarPV.CUT_OUT_PERCENT], row[SolarPV.RATED_CAPACITY], row[SolarPV.IRRADIANCE], XYCurve.CLID,
int(row[SolarPV.PVEFF_CURVE_ID]), value_temperature, XYCurve.CLID, int(row[SolarPV.PVTEMP_CURVE_ID]))
if row[SolarPV.FUNCTIONAL_STATUS]*row[SolarPV.OPERATIONAL_STATUS] == 0.0:
dss.Command = 'Disable \'PVSystem.{}\''.format(str_self_name)
return 0
except:
print('Error: #-1250')
return -1250
def voltagesToSets(self):
try:
return set(self.matrix[:, SolarPV.NOMINAL_LL_VOLTAGE])
except:
print('Error: #-1254')
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.SetActiveBus(str(Bus.CLID) + '_' + str(int(row[SolarPV.ID])))
dssCkt.PVSystems.Name = str(int(row[SolarPV.TYPE])) + '_' + str(int(row[SolarPV.ID])) # NOT BASED ON PHASES LIKE LOAD COMPONENTS
var_volt_mag = list(dssActvBus.VMagAngle)
var_volt_pu = list(dssActvBus.puVmagAngle)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[SolarPV.A_PU_VOLTAGE : SolarPV.REACTIVE_POWER+1] = 0.0
if row[SolarPV.A] == 1.0:
row[SolarPV.A_VOLTAGE] = var_volt_mag[idxcount*2]
row[SolarPV.A_VOLTAGE_ANGLE] = var_volt_mag[idxcount*2 + 1]
row[SolarPV.A_PU_VOLTAGE] = var_volt_pu[idxcount*2]
row[SolarPV.A_CURRENT] = var_curr[idxcount*2]
row[SolarPV.A_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[SolarPV.REAL_POWER] += var_pow[idxcount*2]
row[SolarPV.REACTIVE_POWER] += var_pow[idxcount*2 + 1]
idxcount += 1
return 0
except:
print('Error: #-1256')
return -1256
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1258')
return -1258
def convertToInputTensor(self):
try:
input_list_continuous = []
input_list_categorical = []
input_col_continuous = ['irradiance', 'power_factor_control']
input_col_categorical = ['operational_status']
for row in self.matrix:
for elem in input_col_continuous:
input_list_continuous.append('SolarPV_' + str(int(row[SolarPV.ID])) + '_' + elem)
for elem in input_col_categorical:
input_list_categorical.append('SolarPV_' + str(int(row[SolarPV.ID])) + '_' + elem)
inputdf = self.convertToDataFrame()
inputdf_continuous = inputdf[input_col_continuous]
inputdf_categorical = inputdf[input_col_categorical]
return input_list_continuous, input_list_categorical, inputdf_continuous.values.flatten(), inputdf_categorical.values.flatten()
except:
print('Error: #-1259')
return -1259
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['a_PU_voltage', 'a_current']
for row in self.matrix:
for elem in output_col:
output_list.append('SolarPV_' + str(int(row[SolarPV.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1260')
return -1260
def randomStochasticity(self):
try:
row = random.randrange(0, self.num_components)
if random.randrange(0, 2) == 0:
max_irradiance = 1050.0
rval = random.normalvariate(0, 0.5*max_irradiance*0.07)
self.matrix[row, SolarPV.IRRADIANCE] += rval
if self.matrix[row, SolarPV.IRRADIANCE] > max_irradiance:
self.matrix[row, SolarPV.IRRADIANCE] = max_irradiance
elif self.matrix[row, SolarPV.IRRADIANCE] < 0.0:
self.matrix[row, SolarPV.IRRADIANCE] = 0.0
else:
rval = random.normalvariate(0, 0.5*(1.0-self.matrix[row, SolarPV.MIN_POWER_FACTOR])*0.1)
self.matrix[row, SolarPV.POWER_FACTOR_CONTROL] += rval
if self.matrix[row, SolarPV.POWER_FACTOR_CONTROL] > 1.0:
self.matrix[row, SolarPV.POWER_FACTOR_CONTROL] = 1.0
elif self.matrix[row, SolarPV.POWER_FACTOR_CONTROL] < self.matrix[row, SolarPV.MIN_POWER_FACTOR]:
self.matrix[row, SolarPV.POWER_FACTOR_CONTROL] = self.matrix[row, SolarPV.MIN_POWER_FACTOR]
except:
print('Error: #1261')
return -1261
def randomSwitching(self):
try:
row = random.randrange(0, self.num_components)
self.matrix[row, SolarPV.OPERATIONAL_STATUS] = 0.0
except:
print('Error: #1262')
return -1262
class WindTurbine: #errors -1275 to -1299
CLID = 1305
ID = 0
TYPE = 1
FUNCTIONAL_STATUS = 2 # switch
NOMINAL_LL_VOLTAGE = 3
A = 4
CUT_IN_SPEED = 7
CUT_OUT_SPEED = 8
MODEL = 9
POWER_FACTOR = 10
RATED_CAPACITY = 11
RATED_SPEED = 12
WIND_CURVE_ID = 15
WIRING = 16
WIND_SPEED = 17 # stochastic
MIN_PU_VOLTAGE = 18
MAX_PU_VOLTAGE = 19
OPERATIONAL_STATUS = 20 # switch
A_PU_VOLTAGE = 21
A_VOLTAGE = 24
A_VOLTAGE_ANGLE = 27
A_CURRENT = 30
A_CURRENT_ANGLE = 34
REAL_POWER = 38
REACTIVE_POWER = 39
def __init__(self, dframe, xy_object):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 1
self.wind_object = xy_object
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in WindTurbine0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
str_bus_conn = ''
str_conn = 'wye'
num_phases = 0
num_kv = row[WindTurbine.NOMINAL_LL_VOLTAGE]
if row[WindTurbine.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if num_phases == 0:
print('Error: #-1274')
if row[WindTurbine.WIRING] == 0.0:
str_conn = 'delta'
elif num_phases == 1:
num_kv = num_kv / math.sqrt(3.0)
wind_fraction = (row[WindTurbine.WIND_SPEED]-row[WindTurbine.CUT_IN_SPEED]) / (row[WindTurbine.RATED_SPEED]-row[WindTurbine.CUT_IN_SPEED])
gen_fraction = self.wind_object.returnWindGenFraction(row[WindTurbine.WIND_CURVE_ID], wind_fraction)
if row[WindTurbine.WIND_SPEED] > row[WindTurbine.CUT_OUT_SPEED]:
gen_fraction = 0.0
str_self_name = str(int(row[WindTurbine.TYPE])) + '_' + str(int(row[WindTurbine.ID]))
str_bus_name = str(Bus.CLID) + '_' + str(int(row[WindTurbine.ID]))
if debug == 1:
print('New \'Generator.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Pf=\'{:f}\' Model=\'{}\' Conn=\'{}\'\n'.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, gen_fraction*row[WindTurbine.RATED_CAPACITY], row[WindTurbine.POWER_FACTOR], int(row[WindTurbine.MODEL]),
str_conn))
if row[WindTurbine.FUNCTIONAL_STATUS]*row[WindTurbine.OPERATIONAL_STATUS] == 0.0:
print('Disable \'Generator.{}\''.format(str_self_name))
dss.Command = 'New \'Generator.{}\' Bus1=\'{}{}\' Phases=\'{}\' Kv=\'{:f}\' Kw=\'{:f}\' Pf=\'{:f}\' Model=\'{}\' Conn=\'{}\''.format(
str_self_name, str_bus_name, str_bus_conn, num_phases,
num_kv, gen_fraction*row[WindTurbine.RATED_CAPACITY], row[WindTurbine.POWER_FACTOR], int(row[WindTurbine.MODEL]),
str_conn)
if row[WindTurbine.FUNCTIONAL_STATUS]*row[WindTurbine.OPERATIONAL_STATUS] == 0.0:
dss.Command = 'Disable \'Generator.{}\''.format(str_self_name)
return 0
except:
print('Error: #-1275')
return -1275
def voltagesToSets(self):
try:
return set(self.matrix[:, WindTurbine.NOMINAL_LL_VOLTAGE])
except:
print('Error: #-1279')
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.SetActiveBus(str(Bus.CLID) + '_' + str(int(row[WindTurbine.ID])))
dssCkt.Generators.Name = str(int(row[WindTurbine.TYPE])) + '_' + str(int(row[WindTurbine.ID]))
var_volt_mag = list(dssActvBus.VMagAngle)
var_volt_pu = list(dssActvBus.puVmagAngle)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[WindTurbine.A_PU_VOLTAGE : WindTurbine.REACTIVE_POWER+1] = 0.0
if row[WindTurbine.A] == 1.0:
row[WindTurbine.A_VOLTAGE] = var_volt_mag[idxcount*2]
row[WindTurbine.A_VOLTAGE_ANGLE] = var_volt_mag[idxcount*2 + 1]
row[WindTurbine.A_PU_VOLTAGE] = var_volt_pu[idxcount*2]
row[WindTurbine.A_CURRENT] = var_curr[idxcount*2]
row[WindTurbine.A_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[WindTurbine.REAL_POWER] += var_pow[idxcount*2]
row[WindTurbine.REACTIVE_POWER] += var_pow[idxcount*2 + 1]
idxcount += 1
return 0
except:
print('Error: #-1281')
return -1281
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1283')
return -1283
def convertToInputTensor(self):
try:
# TO DO:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
pass
def convertToOutputTensor(self):
try:
# TO DO:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
pass
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class DirectConnection: #errors -1400 to -1424
CLID = 1400
ID = 0
TYPE = 1
TERMINAL_1_ID = 2
TERMINAL_2_ID = 3
FUNCTIONAL_STATUS = 4 # switch
A = 5
ANGLE_DELTA_LIMIT = 6
OPERATIONAL_STATUS = 7 # switch
A_1_CURRENT = 8
A_1_CURRENT_ANGLE = 9
A_2_CURRENT = 10
A_2_CURRENT_ANGLE = 11
REAL_POWER_1 = 12
REACTIVE_POWER_1 = 13
REAL_POWER_2 = 14
REACTIVE_POWER_2 = 15
REAL_POWER_LOSSES = 16
REACTIVE_POWER_LOSSES = 17
ANGLE_DELTA = 18
UNITS = 'ft'
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in DirectConnection0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
terminal_1_type = Bus.CLID
terminal_2_type = Bus.CLID
str_bus_conn = ''
num_phases = 0
if row[DirectConnection.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if str_bus_conn == '':
print('Error: #-1401')
str_self_name = str(int(row[DirectConnection.TYPE])) + '_' + str(int(row[DirectConnection.ID]))
str_term1_name = str(terminal_1_type) + '_' + str(int(row[DirectConnection.TERMINAL_1_ID]))
str_term2_name = str(terminal_2_type) + '_' + str(int(row[DirectConnection.TERMINAL_2_ID]))
if row[DirectConnection.TERMINAL_1_ID] < 1:
str_term1_name = 'sourcebus'
elif row[DirectConnection.TERMINAL_2_ID] < 1:
str_term2_name = 'sourcebus'
if debug == 1:
print('New \'Line.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' Phases=\'{}\' R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\' Length=\'{:f}\' Units=\'{}\' Normamps=\'{}\' Emergamps=\'{}\'\n'.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, num_phases, 0.00001, 0.00001,
0.0001, 0.0001, 0.1, DirectConnection.UNITS,
9999, 9999))
dss.Command = 'New \'Line.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' Phases=\'{}\' R0=\'{:f}\' R1=\'{:f}\' X0=\'{:f}\' X1=\'{:f}\' Length=\'{:f}\' Units=\'{}\' Normamps=\'{}\' Emergamps=\'{}\''.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, num_phases, 0.00001, 0.00001,
0.0001, 0.0001, 0.1, DirectConnection.UNITS,
9999, 9999)
return 0
except:
print('Error: #-1400')
return -1400
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.Lines.Name = str(int(row[DirectConnection.TYPE])) + '_' + str(int(row[DirectConnection.ID]))
var_bus = list(dssActvElem.BusNames)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
norm_amps = dssActvElem.NormalAmps
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[DirectConnection.A_1_CURRENT : DirectConnection.ANGLE_DELTA+1] = 0.0
if row[DirectConnection.A] == 1.0:
row[DirectConnection.A_1_CURRENT] = var_curr[idxcount*2]
row[DirectConnection.A_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[DirectConnection.REAL_POWER_1] += var_pow[idxcount*2]
row[DirectConnection.REACTIVE_POWER_1] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[DirectConnection.N_1_CURRENT] = var_curr[idxcount*2]
# row[DirectConnection.N_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
if row[DirectConnection.A] == 1.0:
row[DirectConnection.A_2_CURRENT] = var_curr[idxcount*2]
row[DirectConnection.A_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[DirectConnection.REAL_POWER_2] += var_pow[idxcount*2]
row[DirectConnection.REACTIVE_POWER_2] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[DirectConnection.N_2_CURRENT] = var_curr[idxcount*2]
# row[DirectConnection.N_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
row[DirectConnection.REAL_POWER_LOSSES] = math.fabs(row[DirectConnection.REAL_POWER_1] + row[DirectConnection.REAL_POWER_2])
row[DirectConnection.REACTIVE_POWER_LOSSES] = math.fabs(row[DirectConnection.REACTIVE_POWER_1] + row[DirectConnection.REACTIVE_POWER_2])
return 0
except:
print('Error: #-1404')
return -1404
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1406')
return -1406
def convertToInputTensor(self):
try:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
pass
def convertToOutputTensor(self):
try:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
pass
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class Cable: #errors -1425 to -1449
CLID = 1401
ID = 0
TYPE = 1
TERMINAL_1_ID = 2
TERMINAL_2_ID = 3
FUNCTIONAL_STATUS_A = 4 # switch
A = 5
LENGTH = 6
LINECODE_ID = 7
ANGLE_DELTA_LIMIT = 8
NORMAL_RATING = 9
MAX_PU_CAPACITY = 10
OPERATIONAL_STATUS_A = 11 # switch
A_1_CURRENT = 12
A_1_CURRENT_ANGLE = 13
A_2_CURRENT = 14
A_2_CURRENT_ANGLE = 15
A_PU_CAPACITY = 16
REAL_POWER_1 = 17
REACTIVE_POWER_1 = 18
REAL_POWER_2 = 19
REACTIVE_POWER_2 = 20
REAL_POWER_LOSSES = 21
REACTIVE_POWER_LOSSES = 22
ANGLE_DELTA = 23
UNITS = 'ft'
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = int(self.matrix[:, Cable.A].sum()) * 1 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in Cable0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
terminal_1_type = Bus.CLID
terminal_2_type = Bus.CLID
str_bus_conn = ''
if row[Cable.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
#the correct thing to do would check this sum with the number of phases for the linecode
if str_bus_conn == '':
print('Error: #-1426')
str_self_name = str(int(row[Cable.TYPE])) + '_' + str(int(row[Cable.ID]))
str_term1_name = str(terminal_1_type) + '_' + str(int(row[Cable.TERMINAL_1_ID]))
str_term2_name = str(terminal_2_type) + '_' + str(int(row[Cable.TERMINAL_2_ID]))
str_linec_name = str(LineCode.CLID) + '_' + str(int(row[Cable.LINECODE_ID]))
if row[Cable.TERMINAL_1_ID] < 1:
str_term1_name = 'sourcebus'
elif row[Cable.TERMINAL_2_ID] < 1:
str_term2_name = 'sourcebus'
if row[Cable.NORMAL_RATING] <= 100000.0:
normal_amps = row[Cable.NORMAL_RATING] / 138.0
else:
normal_amps = row[Cable.NORMAL_RATING] / 230.0
emerg_amps = normal_amps * row[Cable.MAX_PU_CAPACITY]
if debug == 1:
print('New \'Line.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' LineCode=\'{}\' Length=\'{:f}\' Units=\'{}\' Normamps=\'{:f}\' Emergamps=\'{:f}\'\n'.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, str_linec_name, row[Cable.LENGTH], Cable.UNITS,
normal_amps, emerg_amps))
if row[Cable.A] == 1.0 and row[Cable.FUNCTIONAL_STATUS_A]*row[Cable.OPERATIONAL_STATUS_A] == 0.0:
print('Open \'Line.{}\' Term=1 1'.format(str_self_name))
print('Open \'Line.{}\' Term=2 1'.format(str_self_name))
dss.Command = 'New \'Line.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' LineCode=\'{}\' Length=\'{:f}\' Units=\'{}\' Normamps=\'{:f}\' Emergamps=\'{:f}\''.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, str_linec_name, row[Cable.LENGTH], Cable.UNITS,
normal_amps, emerg_amps)
if row[Cable.A] == 1.0 and row[Cable.FUNCTIONAL_STATUS_A]*row[Cable.OPERATIONAL_STATUS_A] == 0.0:
dss.Command = 'Open \'Line.{}\' Term=1 1'.format(str_self_name)
dss.Command = 'Open \'Line.{}\' Term=2 1'.format(str_self_name)
return 0
except:
print('Error: #-1425')
return -1425
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.Lines.Name = str(int(row[Cable.TYPE])) + '_' + str(int(row[Cable.ID]))
var_bus = list(dssActvElem.BusNames)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[Cable.A_1_CURRENT : Cable.ANGLE_DELTA+1] = 0.0
if row[Cable.A] == 1.0:
row[Cable.A_1_CURRENT] = var_curr[idxcount*2]
row[Cable.A_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[Cable.REAL_POWER_1] += var_pow[idxcount*2]
row[Cable.REACTIVE_POWER_1] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[Cable.N_1_CURRENT] = var_curr[idxcount*2]
# row[Cable.N_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
if row[Cable.A] == 1.0:
row[Cable.A_2_CURRENT] = var_curr[idxcount*2]
row[Cable.A_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[Cable.REAL_POWER_2] += var_pow[idxcount*2]
row[Cable.REACTIVE_POWER_2] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[Cable.N_2_CURRENT] = var_curr[idxcount*2]
# row[Cable.N_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
row[Cable.REAL_POWER_LOSSES] = math.fabs(row[Cable.REAL_POWER_1] + row[Cable.REAL_POWER_2])
row[Cable.REACTIVE_POWER_LOSSES] = math.fabs(row[Cable.REACTIVE_POWER_1] + row[Cable.REACTIVE_POWER_2])
if row[Cable.NORMAL_RATING]*row[Cable.MAX_PU_CAPACITY] != 0.0:
row[Cable.A_PU_CAPACITY] = 0.5*(row[Cable.REAL_POWER_2] - row[Cable.REAL_POWER_1]) / (row[Cable.NORMAL_RATING]*row[Cable.MAX_PU_CAPACITY])
else:
row[Cable.A_PU_CAPACITY] = 0.0
return 0
except:
print('Error: #-1429')
return -1429
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1431')
return -1431
def convertToInputTensor(self):
try:
input_list_continuous = []
input_list_categorical = []
input_col_categorical = ['operational_status_a']
for row in self.matrix:
for elem in input_col_categorical:
input_list_categorical.append('Cable_' + str(int(row[Cable.ID])) + '_' + elem)
inputdf = self.convertToDataFrame()
inputdf_categorical = inputdf[input_col_categorical]
return input_list_continuous, input_list_categorical, np.empty([0,0], dtype=np.float64).flatten(), inputdf_categorical.values.flatten()
except:
print('Error: #-1432')
return -1432
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['a_PU_capacity']
for row in self.matrix:
for elem in output_col:
output_list.append('Cable_' + str(int(row[Cable.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1433')
return -1433
def randomStochasticity(self):
pass
def randomSwitching(self):
try:
flag = 0
ridx = random.randrange(1, int(self.matrix[:, Cable.OPERATIONAL_STATUS_A].sum())+1)
tempval = 0
for row in self.matrix:
tempval += int(row[Cable.OPERATIONAL_STATUS_A].sum())
if ridx <= tempval:
while True:
if row[Cable.OPERATIONAL_STATUS_A] != 1.0:
print('Error: #-1434')
break
# rphase = random.randrange(0, 3)
if row[Cable.OPERATIONAL_STATUS_A] == 1.0:
row[Cable.OPERATIONAL_STATUS_A] = 0.0
flag = 1
break
break
if flag == 0:
print('Error: #-1435')
except:
print('Error: #-1436')
return -1435
class OverheadLine: #errors -1450 to -1474
CLID = 1402
ID = 0
TYPE = 1
TERMINAL_1_ID = 2
TERMINAL_2_ID = 3
FUNCTIONAL_STATUS_A = 4 # switch
A = 5
LENGTH = 6
NEUTRAL_WIREDATA_ID = 7
PHASE_WIREDATA_ID = 8
SOIL_RESISTIVITY = 9
X_A_COORDINATE = 10
X_N_COORDINATE = 11
H_A_COORDINATE = 12
H_N_COORDINATE = 13
ANGLE_DELTA_LIMIT = 14
OPERATIONAL_STATUS_A = 15 # switch
A_1_CURRENT = 16
A_1_CURRENT_ANGLE = 17
A_2_CURRENT = 18
A_2_CURRENT_ANGLE = 19
A_PU_CAPACITY = 20
REAL_POWER_1 = 21
REACTIVE_POWER_1 = 22
REAL_POWER_2 = 23
REACTIVE_POWER_2 = 24
REAL_POWER_LOSSES = 25
REACTIVE_POWER_LOSSES = 26
ANGLE_DELTA = 27
UNITS = 'ft'
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = int(self.matrix[:, OverheadLine.A].sum()) * 1 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in OverheadLine0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
terminal_1_type = Bus.CLID
terminal_2_type = Bus.CLID
str_bus_conn = ''
num_phases = 0
num_neutral = 1
if row[OverheadLine.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if num_phases == 0:
print('Error: #-1450')
if row[OverheadLine.NEUTRAL_WIREDATA_ID] < 1.0:
num_neutral = 0
str_self_name = str(int(row[OverheadLine.TYPE])) + '_' + str(int(row[OverheadLine.ID]))
str_term1_name = str(terminal_1_type) + '_' + str(int(row[OverheadLine.TERMINAL_1_ID]))
str_term2_name = str(terminal_2_type) + '_' + str(int(row[OverheadLine.TERMINAL_2_ID]))
str_pwire_name = str(WireData.CLID) + '_' + str(int(row[OverheadLine.PHASE_WIREDATA_ID]))
str_nwire_name = str(WireData.CLID) + '_' + str(int(row[OverheadLine.NEUTRAL_WIREDATA_ID]))
if row[OverheadLine.TERMINAL_1_ID] < 1:
str_term1_name = 'sourcebus'
elif row[OverheadLine.TERMINAL_2_ID] < 1:
str_term2_name = 'sourcebus'
if debug == 1:
print('New \'LineGeometry.LG_{}\' Nconds=\'{}\' Nphases=\'{}\'\n'.format(
int(row[OverheadLine.ID]), num_phases+num_neutral, num_phases))
if row[OverheadLine.A] == 1.0:
print('~ Cond=1 Wire=\'{}\' X=\'{:0.3f}\' H=\'{:0.3f}\' Units=\'{}\'\n'.format(
str_pwire_name, row[OverheadLine.X_A_COORDINATE], row[OverheadLine.H_A_COORDINATE], OverheadLine.UNITS))
if num_neutral == 1:
print('~ Cond=4 Wire=\'{}\' X=\'{:0.3f}\' H=\'{:0.3f}\' Units=\'{}\'\n'.format(
str_nwire_name, row[OverheadLine.X_N_COORDINATE], row[OverheadLine.H_N_COORDINATE], OverheadLine.UNITS))
print('New \'Line.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' Phases=\'{}\' Geometry=\'LG_{}\' Length=\'{:f}\' Rho=\'{:f}\' Units=\'{}\'\n'.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, num_phases, int(row[OverheadLine.ID]), row[OverheadLine.LENGTH],
row[OverheadLine.SOIL_RESISTIVITY], OverheadLine.UNITS))
dss.Command = 'New \'LineGeometry.LG_{}\' Nconds=\'{}\' Nphases=\'{}\''.format(
int(row[OverheadLine.ID]), num_phases+num_neutral, num_phases)
if row[OverheadLine.A] == 1.0:
dss.Command = '~ Cond=1 Wire=\'{}\' X=\'{:0.3f}\' H=\'{:0.3f}\' Units=\'{}\''.format(
str_pwire_name, row[OverheadLine.X_A_COORDINATE], row[OverheadLine.H_A_COORDINATE], OverheadLine.UNITS)
if num_neutral == 1:
dss.Command = '~ Cond=4 Wire=\'{}\' X=\'{:0.3f}\' H=\'{:0.3f}\' Units=\'{}\''.format(
str_nwire_name, row[OverheadLine.X_N_COORDINATE], row[OverheadLine.H_N_COORDINATE], OverheadLine.UNITS)
dss.Command = 'New \'Line.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' Phases=\'{}\' Geometry=\'LG_{}\' Length=\'{:f}\' Rho=\'{:f}\' Units=\'{}\''.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, num_phases, int(row[OverheadLine.ID]), row[OverheadLine.LENGTH],
row[OverheadLine.SOIL_RESISTIVITY], OverheadLine.UNITS)
return 0
except:
print('Error: #-1450')
return -1450
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.Lines.Name = str(int(row[OverheadLine.TYPE])) + '_' + str(int(row[OverheadLine.ID]))
var_bus = list(dssActvElem.BusNames)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
norm_amps_inv = 1.0 / dssActvElem.NormalAmps
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[OverheadLine.A_1_CURRENT : OverheadLine.ANGLE_DELTA+1] = 0.0
if row[OverheadLine.A] == 1.0:
row[OverheadLine.A_1_CURRENT] = var_curr[idxcount*2]
row[OverheadLine.A_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[OverheadLine.REAL_POWER_1] += var_pow[idxcount*2]
row[OverheadLine.REACTIVE_POWER_1] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[OverheadLine.N_1_CURRENT] = var_curr[idxcount*2]
# row[OverheadLine.N_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
if row[OverheadLine.A] == 1.0:
row[OverheadLine.A_2_CURRENT] = var_curr[idxcount*2]
row[OverheadLine.A_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[OverheadLine.REAL_POWER_2] += var_pow[idxcount*2]
row[OverheadLine.REACTIVE_POWER_2] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[OverheadLine.N_2_CURRENT] = var_curr[idxcount*2]
# row[OverheadLine.N_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount +=1
row[OverheadLine.REAL_POWER_LOSSES] = math.fabs(row[OverheadLine.REAL_POWER_1] + row[OverheadLine.REAL_POWER_2])
row[OverheadLine.REACTIVE_POWER_LOSSES] = math.fabs(row[OverheadLine.REACTIVE_POWER_1] + row[OverheadLine.REACTIVE_POWER_2])
row[OverheadLine.A_PU_CAPACITY] = 0.5 * (row[OverheadLine.A_1_CURRENT] + row[OverheadLine.A_2_CURRENT]) * norm_amps_inv
return 0
except:
print('Error: #-1454')
return -1454
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1456')
return -1456
def convertToInputTensor(self):
try:
# TO DO:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
pass
def convertToOutputTensor(self):
try:
# TO DO:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
pass
def randomStochasticity(self):
pass
def randomSwitching(self):
pass
class TwoWindingTransformer: #errors -1475 to -1499
CLID = 1403
ID = 0
TYPE = 1
TERMINAL_1_ID = 2
TERMINAL_2_ID = 3
FUNCTIONAL_STATUS = 4 # switch
A = 5
MIN_TAP = 6
MAX_TAP = 7
R1 = 8
RATED_CAPACITY = 9
REGCONTROL_ID = 10
TERMINAL_1_LL_VOLTAGE = 11
TERMINAL_1_WIRING = 12
TERMINAL_2_LL_VOLTAGE = 13
TERMINAL_2_WIRING = 14
X1 = 15
ANGLE_DELTA_LIMIT = 16
MAX_PU_CAPACITY = 17
OPERATIONAL_STATUS = 18 # switch
TAP_1 = 19 # stochastic
TAP_2 = 20 # stochastic
A_1_CURRENT = 21
A_1_CURRENT_ANGLE = 22
A_2_CURRENT = 23
A_2_CURRENT_ANGLE = 24
REAL_POWER_1 = 25
REACTIVE_POWER_1 = 26
REAL_POWER_2 = 27
REACTIVE_POWER_2 = 28
REAL_POWER_LOSSES = 29
REACTIVE_POWER_LOSSES = 30
ANGLE_DELTA = 31
PU_CAPACITY = 32
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 2
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in TwoWindingTransformer0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
terminal_1_type = Bus.CLID
terminal_2_type = Bus.CLID
terminal_1_str_conn = 'wye'
terminal_2_str_conn = 'wye'
terminal_1_num_kv = row[TwoWindingTransformer.TERMINAL_1_LL_VOLTAGE]
terminal_2_num_kv = row[TwoWindingTransformer.TERMINAL_2_LL_VOLTAGE]
str_bus_conn = ''
num_phases = 0
num_windings = 2
if row[TwoWindingTransformer.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if row[TwoWindingTransformer.TERMINAL_1_WIRING] == 0.0:
terminal_1_str_conn = 'delta'
if num_phases == 1:
print('Error: #-1475')
if row[TwoWindingTransformer.TERMINAL_2_WIRING] == 0.0:
terminal_2_str_conn = 'delta'
if num_phases == 1:
print('Error: #-1475')
if num_phases == 0:
print('Transformer {} has {} phases not {}'.format(row[TwoWindingTransformer.ID], num_phases, row[TwoWindingTransformer.A]))
print('Error: #-1476')
if num_phases == 1:
terminal_1_num_kv = terminal_1_num_kv / math.sqrt(3.0)
terminal_2_num_kv = terminal_2_num_kv / math.sqrt(3.0)
str_self_name = str(int(row[TwoWindingTransformer.TYPE])) + '_' + str(int(row[TwoWindingTransformer.ID]))
str_term1_name = str(terminal_1_type) + '_' + str(int(row[TwoWindingTransformer.TERMINAL_1_ID]))
str_term2_name = str(terminal_2_type) + '_' + str(int(row[TwoWindingTransformer.TERMINAL_2_ID]))
if row[TwoWindingTransformer.TERMINAL_1_ID] < 1:
str_term1_name = 'sourcebus'
elif row[TwoWindingTransformer.TERMINAL_2_ID] < 1:
str_term2_name = 'sourcebus'
row[TwoWindingTransformer.TAP_1] = round(row[TwoWindingTransformer.TAP_1])
row[TwoWindingTransformer.TAP_2] = round(row[TwoWindingTransformer.TAP_2])
row[TwoWindingTransformer.MIN_TAP] = round(row[TwoWindingTransformer.MIN_TAP])
row[TwoWindingTransformer.MAX_TAP] = round(row[TwoWindingTransformer.MAX_TAP])
if row[TwoWindingTransformer.TAP_1] < row[TwoWindingTransformer.MIN_TAP]:
row[TwoWindingTransformer.TAP_1] = row[TwoWindingTransformer.MIN_TAP]
elif row[TwoWindingTransformer.TAP_1] > row[TwoWindingTransformer.MAX_TAP]:
row[TwoWindingTransformer.TAP_1] = row[TwoWindingTransformer.MAX_TAP]
if row[TwoWindingTransformer.TAP_2] < row[TwoWindingTransformer.MIN_TAP]:
row[TwoWindingTransformer.TAP_2] = row[TwoWindingTransformer.MIN_TAP]
elif row[TwoWindingTransformer.TAP_2] > row[TwoWindingTransformer.MAX_TAP]:
row[TwoWindingTransformer.TAP_2] = row[TwoWindingTransformer.MAX_TAP]
if debug == 1:
print('New \'Transformer.{}\' Phases=\'{}\' Windings=\'{}\' XHL=\'{:f}\' %R=\'{:f}\'\n'.format(
str_self_name, num_phases, num_windings, row[TwoWindingTransformer.X1],
row[TwoWindingTransformer.R1]))
print('~ wdg=1 Bus=\'{}{}\' Kv=\'{:f}\' Tap=\'{:f}\' Kva=\'{:f}\' Conn=\'{}\'\n'.format(
str_term1_name, str_bus_conn, terminal_1_num_kv, 1.0 + row[TwoWindingTransformer.TAP_1]*0.00625,
row[TwoWindingTransformer.RATED_CAPACITY], terminal_1_str_conn))
print('~ wdg=2 Bus=\'{}{}\' Kv=\'{:f}\' Tap=\'{:f}\' Kva=\'{:f}\' Conn=\'{}\'\n'.format(
str_term2_name, str_bus_conn, terminal_2_num_kv, 1.0 + row[TwoWindingTransformer.TAP_2]*0.00625,
row[TwoWindingTransformer.RATED_CAPACITY], terminal_2_str_conn))
if row[TwoWindingTransformer.FUNCTIONAL_STATUS]*row[TwoWindingTransformer.OPERATIONAL_STATUS] == 0.0:
print('Open \'Transformer.{}\' Term=1'.format(str_self_name))
print('Open \'Transformer.{}\' Term=2'.format(str_self_name))
dss.Command = 'New \'Transformer.{}\' Phases=\'{}\' Windings=\'{}\' XHL=\'{:f}\' %R=\'{:f}\''.format(
str_self_name, num_phases, num_windings, row[TwoWindingTransformer.X1],
row[TwoWindingTransformer.R1])
dss.Command = '~ wdg=1 Bus=\'{}{}\' Kv=\'{:f}\' Tap=\'{:f}\' Kva=\'{:f}\' Conn=\'{}\''.format(
str_term1_name, str_bus_conn, terminal_1_num_kv, 1.0 + row[TwoWindingTransformer.TAP_1]*0.00625,
row[TwoWindingTransformer.RATED_CAPACITY], terminal_1_str_conn)
dss.Command = '~ wdg=2 Bus=\'{}{}\' Kv=\'{:f}\' Tap=\'{:f}\' Kva=\'{:f}\' Conn=\'{}\''.format(
str_term2_name, str_bus_conn, terminal_2_num_kv, 1.0 + row[TwoWindingTransformer.TAP_2]*0.00625,
row[TwoWindingTransformer.RATED_CAPACITY], terminal_2_str_conn)
if row[TwoWindingTransformer.FUNCTIONAL_STATUS]*row[TwoWindingTransformer.OPERATIONAL_STATUS] == 0.0:
dss.Command = 'Open \'Transformer.{}\' Term=1'.format(str_self_name)
dss.Command = 'Open \'Transformer.{}\' Term=2'.format(str_self_name)
return 0
except:
print('Error: #-1475')
return -1475
def voltagesToSets(self):
try:
return set(self.matrix[:, TwoWindingTransformer.TERMINAL_1_LL_VOLTAGE]) | set(self.matrix[:, TwoWindingTransformer.TERMINAL_2_LL_VOLTAGE])
except:
print('Error: #-1479')
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.Transformers.Name = str(int(row[TwoWindingTransformer.TYPE])) + '_' + str(int(row[TwoWindingTransformer.ID]))
var_bus = list(dssActvElem.BusNames)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
norm_amps = dssActvElem.NormalAmps
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
row[TwoWindingTransformer.A_1_CURRENT : TwoWindingTransformer.PU_CAPACITY+1] = 0.0
if row[TwoWindingTransformer.A] == 1.0:
row[TwoWindingTransformer.A_1_CURRENT] = var_curr[idxcount*2]
row[TwoWindingTransformer.A_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[TwoWindingTransformer.REAL_POWER_1] += var_pow[idxcount*2]
row[TwoWindingTransformer.REACTIVE_POWER_1] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[TwoWindingTransformer.N_1_CURRENT] = var_curr[idxcount*2]
# row[TwoWindingTransformer.N_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
if row[TwoWindingTransformer.A] == 1.0:
row[TwoWindingTransformer.A_2_CURRENT] = var_curr[idxcount*2]
row[TwoWindingTransformer.A_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[TwoWindingTransformer.REAL_POWER_2] += var_pow[idxcount*2]
row[TwoWindingTransformer.REACTIVE_POWER_2] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[TwoWindingTransformer.N_2_CURRENT] = var_curr[idxcount*2]
# row[TwoWindingTransformer.N_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
row[TwoWindingTransformer.REAL_POWER_LOSSES] = math.fabs(row[TwoWindingTransformer.REAL_POWER_1] + row[TwoWindingTransformer.REAL_POWER_2])
row[TwoWindingTransformer.REACTIVE_POWER_LOSSES] = math.fabs(row[TwoWindingTransformer.REACTIVE_POWER_1] + row[TwoWindingTransformer.REACTIVE_POWER_2])
row[TwoWindingTransformer.PU_CAPACITY] = math.fabs(row[TwoWindingTransformer.A_1_CURRENT]) / (num_phases * norm_amps)
# TO DO: fix above to 3x phase A current??
return 0
except:
print('Error: #-1481')
return -1481
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1483')
return -1483
def convertToInputTensor(self):
try:
input_list_continuous = []
input_list_categorical = []
input_col_continuous = ['tap_1', 'tap_2']
input_col_categorical = ['operational_status']
for row in self.matrix:
for elem in input_col_continuous:
input_list_continuous.append('TwoWindingTransformer_' + str(int(row[TwoWindingTransformer.ID])) + '_' + elem)
for elem in input_col_categorical:
input_list_categorical.append('TwoWindingTransformer_' + str(int(row[TwoWindingTransformer.ID])) + '_' + elem)
inputdf = self.convertToDataFrame()
inputdf_continuous = inputdf[input_col_continuous]
inputdf_categorical = inputdf[input_col_categorical]
return input_list_continuous, input_list_categorical, inputdf_continuous.values.flatten(), inputdf_categorical.values.flatten()
except:
print('Error: #-1484')
return -1484
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['PU_capacity']
for row in self.matrix:
for elem in output_col:
output_list.append('TwoWindingTransformer_' + str(int(row[TwoWindingTransformer.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1485')
return -1485
def randomStochasticity(self):
try:
row = random.randrange(0, self.num_components)
rval = random.randrange(-1, 2, 2)
if random.randrange(0, 2) == 0:
self.matrix[row, TwoWindingTransformer.TAP_1] += rval
if self.matrix[row, TwoWindingTransformer.TAP_1] < self.matrix[row, TwoWindingTransformer.MIN_TAP]:
self.matrix[row, TwoWindingTransformer.TAP_1] = self.matrix[row, TwoWindingTransformer.MIN_TAP]
elif self.matrix[row, TwoWindingTransformer.TAP_1] > self.matrix[row, TwoWindingTransformer.MAX_TAP]:
self.matrix[row, TwoWindingTransformer.TAP_1] = self.matrix[row, TwoWindingTransformer.MAX_TAP]
else:
self.matrix[row, TwoWindingTransformer.TAP_2] += rval
if self.matrix[row, TwoWindingTransformer.TAP_2] < self.matrix[row, TwoWindingTransformer.MIN_TAP]:
self.matrix[row, TwoWindingTransformer.TAP_2] = self.matrix[row, TwoWindingTransformer.MIN_TAP]
elif self.matrix[row, TwoWindingTransformer.TAP_2] > self.matrix[row, TwoWindingTransformer.MAX_TAP]:
self.matrix[row, TwoWindingTransformer.TAP_2] = self.matrix[row, TwoWindingTransformer.MAX_TAP]
except:
print('Error: #-1486')
return -1486
def randomSwitching(self):
try:
row = random.randrange(0, self.num_components)
self.matrix[row, TwoWindingTransformer.OPERATIONAL_STATUS] = 0.0
except:
print('Error: #-1487')
return -1487
class Capacitor: #errors -1500 to -1524
CLID = 1404
ID = 0
TYPE = 1
TERMINAL_1_ID = 2
TERMINAL_2_ID = 3
FUNCTIONAL_STATUS = 4 # switch
A = 5
NOMINAL_LL_VOLTAGE = 6
REACTIVE_POWER_MIN_RATING = 7
REACTIVE_POWER_MAX_RATING = 8
WIRING = 9
NUMBER_OF_STAGES = 10
ANGLE_DELTA_LIMIT = 11
MAX_PU_CAPACITY = 12
STAGE_ONE = 13 # switch
STAGE_TWO = 14 # switch
STAGE_THREE = 15 # switch
STAGE_FOUR = 16 # switch
STAGE_FIVE = 17 # switch
A_1_CURRENT = 18
A_1_CURRENT_ANGLE = 19
A_2_CURRENT = 20
A_2_CURRENT_ANGLE = 21
REAL_POWER_1 = 22
REACTIVE_POWER_1 = 23
REAL_POWER_2 = 24
REACTIVE_POWER_2 = 25
REAL_POWER_LOSSES = 26
REACTIVE_POWER_LOSSES = 27
ANGLE_DELTA = 28
PU_CAPACITY = 29
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in Capacitor0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
for row in self.matrix:
reactive_power_generation = row[Capacitor.REACTIVE_POWER_MIN_RATING]
if row[Capacitor.REACTIVE_POWER_MAX_RATING] < row[Capacitor.REACTIVE_POWER_MIN_RATING]:
print('Error: #1502')
reactive_power_per_stage = math.fabs(row[Capacitor.REACTIVE_POWER_MAX_RATING] - row[Capacitor.REACTIVE_POWER_MIN_RATING]) / row[Capacitor.NUMBER_OF_STAGES]
terminal_1_type = Bus.CLID
bool_terminal_2 = False
if row[Capacitor.TERMINAL_2_ID] > 0.0:
terminal_2_type = Bus.CLID
bool_terminal_2 = True
num_kv = row[Capacitor.NOMINAL_LL_VOLTAGE]
str_conn = 'wye'
str_bus_conn = ''
num_phases = 0
if row[Capacitor.A] == 1.0:
str_bus_conn = str_bus_conn + '.1'
num_phases += 1
if row[Capacitor.WIRING] == 0.0:
str_conn = 'delta'
elif num_phases == 1:
num_kv = num_kv / math.sqrt(3.0)
if num_phases == 0:
print('Error: #-1501')
if row[Capacitor.STAGE_ONE] == 1.0 and row[Capacitor.NUMBER_OF_STAGES] >= 1.0:
reactive_power_generation += reactive_power_per_stage
if row[Capacitor.STAGE_TWO] == 1.0 and row[Capacitor.NUMBER_OF_STAGES] >= 2.0:
reactive_power_generation += reactive_power_per_stage
if row[Capacitor.STAGE_THREE] == 1.0 and row[Capacitor.NUMBER_OF_STAGES] >= 3.0:
reactive_power_generation += reactive_power_per_stage
if row[Capacitor.STAGE_FOUR] == 1.0 and row[Capacitor.NUMBER_OF_STAGES] >= 4.0:
reactive_power_generation += reactive_power_per_stage
if row[Capacitor.STAGE_FIVE] == 1.0 and row[Capacitor.NUMBER_OF_STAGES] >= 5.0:
reactive_power_generation += reactive_power_per_stage
str_self_name = str(int(row[Capacitor.TYPE])) + '_' + str(int(row[Capacitor.ID]))
str_term1_name = str(terminal_1_type) + '_' + str(int(row[Capacitor.TERMINAL_1_ID]))
if row[Capacitor.TERMINAL_1_ID] < 1:
str_term1_name = 'sourcebus'
if bool_terminal_2 == True:
str_term2_name = str(terminal_2_type) + '_' + str(int(row[Capacitor.TERMINAL_2_ID]))
if row[Capacitor.TERMINAL_2_ID] < 1:
str_term2_name = 'sourcebus'
if debug == 1:
if bool_terminal_2 == True:
print('New \'Capacitor.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' Phases={} Kvar=\'{:f}\' Kv=\'{:f}\' Conn=\'{}\'\n'.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, num_phases, reactive_power_generation, num_kv,
str_conn))
else:
print('New \'Capacitor.{}\' Bus1=\'{}{}\' Phases={} Kvar=\'{:f}\' Kv=\'{:f}\' Conn=\'{}\'\n'.format(
str_self_name, str_term1_name, str_bus_conn, num_phases,
reactive_power_generation, num_kv, str_conn))
if row[Capacitor.FUNCTIONAL_STATUS] == 0.0:
print('Open \'Capacitor.{}\' Term=1'.format(str_self_name))
print('Open \'Capacitor.{}\' Term=2'.format(str_self_name))
if bool_terminal_2 == True:
dss.Command = 'New \'Capacitor.{}\' Bus1=\'{}{}\' Bus2=\'{}{}\' Phases={} Kvar=\'{:f}\ Kv=\'{:f}\' Conn=\'{}\''.format(
str_self_name, str_term1_name, str_bus_conn, str_term2_name,
str_bus_conn, num_phases, reactive_power_generation, num_kv,
str_conn)
else:
dss.Command = 'New \'Capacitor.{}\' Bus1=\'{}{}\' Phases={} Kvar=\'{:f}\' Kv=\'{:f}\' Conn=\'{}\''.format(
str_self_name, str_term1_name, str_bus_conn, num_phases,
reactive_power_generation, num_kv, str_conn)
if row[Capacitor.FUNCTIONAL_STATUS] == 0.0:
dss.Command = 'Open \'Capacitor.{}\' Term=1'.format(str_self_name)
dss.Command = 'Open \'Capacitor.{}\' Term=2'.format(str_self_name)
return 0
except:
print('Error: #-1500')
return -1500
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
for row in self.matrix:
idxcount = 0
dssCkt.Capacitors.Name = str(int(row[Capacitor.TYPE])) + '_' + str(int(row[Capacitor.ID]))
var_bus = list(dssActvElem.BusNames)
var_curr = list(dssActvElem.CurrentsMagAng)
var_pow = list(dssActvElem.Powers)
num_phases = dssActvElem.NumPhases
num_conds = dssActvElem.NumConductors
norm_amps = math.sqrt(3.0) * max(math.fabs(row[Capacitor.REACTIVE_POWER_MAX_RATING]), math.fabs(row[Capacitor.REACTIVE_POWER_MIN_RATING])) / (num_phases * row[Capacitor.NOMINAL_LL_VOLTAGE])
row[Capacitor.A_1_CURRENT : Capacitor.PU_CAPACITY+1] = 0.0
if row[Capacitor.A] == 1.0:
row[Capacitor.A_1_CURRENT] = var_curr[idxcount*2]
row[Capacitor.A_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[Capacitor.REAL_POWER_1] += var_pow[idxcount*2]
row[Capacitor.REACTIVE_POWER_1] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[Capacitor.N_1_CURRENT] = var_curr[idxcount*2]
# row[Capacitor.N_1_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
if row[Capacitor.A] == 1.0:
row[Capacitor.A_2_CURRENT] = var_curr[idxcount*2]
row[Capacitor.A_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
row[Capacitor.REAL_POWER_2] += var_pow[idxcount*2]
row[Capacitor.REACTIVE_POWER_2] += var_pow[idxcount*2 + 1]
idxcount += 1
if num_conds > num_phases:
# row[Capacitor.N_2_CURRENT] = var_curr[idxcount*2]
# row[Capacitor.N_2_CURRENT_ANGLE] = var_curr[idxcount*2 + 1]
idxcount += 1
row[Capacitor.REAL_POWER_LOSSES] = math.fabs(row[Capacitor.REAL_POWER_1] + row[Capacitor.REAL_POWER_2])
row[Capacitor.REACTIVE_POWER_LOSSES] = math.fabs(row[Capacitor.REACTIVE_POWER_1] + row[Capacitor.REACTIVE_POWER_2])
row[Capacitor.PU_CAPACITY] = (math.fabs(row[Capacitor.A_1_CURRENT]) + math.fabs(row[Capacitor.A_2_CURRENT])) / (2.0 * num_phases * norm_amps)
# TO DO: fix above to 3x phase a current?
return 0
except:
print('Error: #-1504')
return -1504
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1506')
return -1506
def convertToInputTensor(self):
try:
input_list_continuous = []
input_list_categorical = []
input_col_categorical = ['stage_one', 'stage_two', 'stage_three', 'stage_four', 'stage_five']
for row in self.matrix:
for elem in input_col_categorical:
input_list_categorical.append('Capacitor_' + str(int(row[Capacitor.ID])) + '_' + elem)
inputdf = self.convertToDataFrame()
inputdf_categorical = inputdf[input_col_categorical]
return input_list_continuous, input_list_categorical, np.empty([0,0], dtype=np.float64).flatten(), inputdf_categorical.values.flatten()
except:
print('Error: #-1507')
return -1507
def convertToOutputTensor(self):
try:
output_list = []
output_col = ['PU_capacity']
for row in self.matrix:
for elem in output_col:
output_list.append('Capacitor_' + str(int(row[Capacitor.ID])) + '_' + elem)
outputdf = self.convertToDataFrame()
outputdf = outputdf[output_col]
return output_list, outputdf.values.flatten()
except:
print('Error: #-1508')
return -1508
def randomStochasticity(self):
pass
def randomSwitching(self):
try:
row = random.randrange(0, self.num_components)
self.matrix[row, Capacitor.OPERATIONAL_STATUS] = 0.0
except:
print('Error: #-1509')
return -1509
class Reactor: #errors -1525 to -1549
CLID = 1405
ID = 0
TYPE = 1
TERMINAL_1_ID = 2
TERMINAL_2_ID = 3
FUNCTIONAL_STATUS = 4 # switch
A = 5
NOMINAL_LL_VOLTAGE = 6
NORMAL_AMPS = 7
R1 = 8
RATED_REACTIVE_POWER = 9
ANGLE_DELTA_LIMIT = 10
MAX_PU_CAPACITY = 11
OPERATIONAL_STATUS = 12 # switch
A_1_CURRENT = 13
A_1_CURRENT_ANGLE = 14
A_2_CURRENT = 15
A_2_CURRENT_ANGLE = 16
REAL_POWER_1 = 17
REACTIVE_POWER_1 = 18
REAL_POWER_2 = 19
REACTIVE_POWER_2 = 20
REAL_POWER_LOSSES = 21
REACTIVE_POWER_LOSSES = 22
ANGLE_DELTA = 23
PU_CAPACITY = 24
def __init__(self, dframe):
self.cols = list(dframe.columns)
self.matrix = dframe.values
self.num_components = len(dframe.index)
self.num_switches = self.num_components * 1 # temporary
self.num_stochastic = self.num_components * 0
self.switch_chance = (0.0, 0.0)
self.stochastic_chance = (0.0, 0.0)
def classValue(cls, str):
try:
return getattr(cls, str)
except:
print('POWER ERROR in Reactor0')
def createAllDSS(self, dss, interconn_dict, debug):
try:
# TO DO:
return 0
except:
print('Error: #-1525')
return -1525
def voltagesToSets(self):
return set()
def readAllDSSOutputs(self, dssCkt, dssActvElem, dssActvBus, var_bus, var_volt_mag, var_volt_pu, var_curr, var_pow):
try:
# TO DO:
return 0
except:
print('Error: #-1529')
return -1529
def convertToDataFrame(self):
try:
return pd.DataFrame(data=self.matrix, columns=self.cols)
except:
print('Error: #-1531')
return -1531
def convertToInputTensor(self):
try:
# TO DO:
return [], [], np.empty([0, 0], dtype=np.float64).flatten(), np.empty([0,0], dtype=np.float64).flatten()
except:
pass
def convertToOutputTensor(self):
try:
# TO DO:
return [], np.empty([0, 0], dtype=np.float64).flatten()
except:
pass
def randomStochasticity(self):
# TO DO:
pass
def randomSwitching(self):
# TO DO:
pass | apache-2.0 |
SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydev_ipython/qt_for_kernel.py | 2 | 3698 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
# #PyDev-779: In pysrc/pydev_ipython/qt_for_kernel.py, matplotlib_options should be replaced with latest from ipython
# (i.e.: properly check backend to decide upon qt4/qt5).
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
# Fallback without checking backend (previous code)
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| bsd-3-clause |
chhao91/pysal | pysal/contrib/spint/gravity_stats.py | 8 | 5584 | # coding=utf-8
"""
Statistics for gravity models
References
----------
Fotheringham, A. S. and O'Kelly, M. E. (1989). Spatial Interaction Models: Formulations
and Applications. London: Kluwer Academic Publishers.
Williams, P. A. and A. S. Fotheringham (1984), The Calibration of Spatial Interaction
Models by Maximum Likelihood Estimation with Program SIMODEL, Geographic Monograph
Series, 7, Department of Geography, Indiana University.
Wilson, A. G. (1967). A statistical theory of spatial distribution models.
Transportation Research, 1, 253–269.
"""
__author__ = "Taylor Oshan tayoshan@gmail.com"
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
import gravity as gv
def sys_stats(gm):
"""
calculate descriptive statistics of model system
"""
system_stats = {}
num_origins = len(gm.o.unique())
system_stats['num_origins'] = num_origins
num_destinations = len(gm.d.unique())
system_stats['num_destinations'] = num_destinations
pairs = len(gm.dt)
system_stats['OD_pairs'] = pairs
observed_flows = np.sum(gm.f)
system_stats['observed_flows'] = observed_flows
predicted_flows = np.sum(gm.ests)
system_stats['predicted_flows'] = predicted_flows
avg_dist = round(np.sum(gm.c)*float(1)/pairs)
system_stats['avg_dist'] = avg_dist
avg_dist_trav = round((np.sum(gm.f*gm.c))*float(1)/np.sum(gm.f)*float(1))
system_stats['avg_dist_trav'] = avg_dist_trav
obs_mean_trip_len = (np.sum(gm.f*gm.c))*float(1)/observed_flows*float(1)
system_stats['obs_mean_trip_len'] = obs_mean_trip_len
pred_mean_trip_len = (np.sum(gm.ests*gm.c))/predicted_flows
system_stats['pred_mean_trip_len'] = pred_mean_trip_len
return system_stats
def ent_stats(gm):
"""
calculate the entropy statistics for the model system
"""
entropy_stats = {}
pij = gm.f/np.sum(gm.f)
phatij = gm.ests/np.sum(gm.f)
max_ent = round(np.log(len(gm.dt)), 4)
entropy_stats['maximum_entropy'] = max_ent
pred_ent = round(-np.sum(phatij*np.log(phatij)), 4)
entropy_stats['predicted_entropy'] = pred_ent
obs_ent = round(-np.sum(pij*np.log(pij)), 4)
entropy_stats['observed_entropy'] = obs_ent
diff_pred_ent = round(max_ent - pred_ent, 4)
entropy_stats['max_pred_deviance'] = diff_pred_ent
diff_obs_ent = round(max_ent - obs_ent, 4)
entropy_stats['max_obs_deviance'] = diff_obs_ent
diff_ent = round(pred_ent - obs_ent, 4)
entropy_stats['pred_obs_deviance'] = diff_ent
ent_rs = round(diff_pred_ent/diff_obs_ent, 4)
entropy_stats['entropy_ratio'] = ent_rs
obs_flows = np.sum(gm.f)
var_pred_ent = round(((np.sum(phatij*(np.log(phatij)**2))-pred_ent**2)/obs_flows) + ((len(gm.dt)-1)/(2*obs_flows**2)), 11)
entropy_stats['variance_pred_entropy'] = var_pred_ent
var_obs_ent = round(((np.sum(pij*np.log(pij)**2)-obs_ent**2)/obs_flows) + ((len(gm.dt)-1)/(2*obs_flows**2)), 11)
entropy_stats['variance_obs_entropy'] = var_obs_ent
t_stat_ent = round((pred_ent-obs_ent)/((var_pred_ent+var_obs_ent)**.5), 4)
entropy_stats['t_stat_entropy'] = t_stat_ent
return entropy_stats
def fit_stats(gm):
"""
calculate the goodness-of-fit statistics
"""
fit_stats = {}
srmse = ((np.sum((gm.f-gm.ests)**2)/len(gm.dt))**.5)/(np.sum(gm.f)/len(gm.dt))
fit_stats['srmse'] = srmse
pearson_r = pearsonr(gm.ests, gm.f)[0]
fit_stats['r_squared'] = pearson_r**2
return fit_stats
def param_stats(gm):
"""
calculate standard errors and likelihood statistics
"""
parameter_statistics = {}
PV = list(gm.p.values())
if len(PV) == 1:
first_deriv = gv.o_function(PV, gm, gm.cf, gm.of, gm.df)
recalc_fd = gv.o_function([PV[0]+.001], gm, gm.cf, gm.of, gm.df)
diff = first_deriv[0]-recalc_fd[0]
second_deriv = -(1/(diff/.001))
gm.p['beta'] = PV
parameter_statistics['beta'] = {}
parameter_statistics['beta']['standard_error'] = np.sqrt(second_deriv)
elif len(PV) > 1:
var_matrix = np.zeros((len(PV),len(PV)))
for x, param in enumerate(PV):
first_deriv = gv.o_function(PV, gm, gm.cf, gm.of, gm.df)
var_params = list(PV)
var_params[x] += .001
var_matrix[x] = gv.o_function(var_params, gm, gm.cf, gm.of, gm.df)
var_matrix[x] = (first_deriv-var_matrix[x])/.001
errors = np.sqrt(-np.linalg.inv(var_matrix).diagonal())
for x, param in enumerate(gm.p):
parameter_statistics[param] = {'standard_error': errors[x]}
LL = np.sum((gm.f/np.sum(gm.f))*np.log((gm.ests/np.sum(gm.ests))))
parameter_statistics['all_params'] = {}
parameter_statistics['all_params']['mle_vals_LL'] = LL
new_PV = list(PV)
for x, param in enumerate(gm.p):
new_PV[x] = 0
gv.o_function(new_PV, gm, gm.cf, gm.of, gm.df)
LL_ests = gm.estimate_flows(gm.c, gm.cf, gm.of, gm.df, gm.p)
new_LL = np.sum((gm.f/np.sum(gm.f))*np.log((LL_ests/np.sum(LL_ests))))
parameter_statistics[param]['LL_zero_val'] = new_LL
lamb = 2*np.sum(gm.f)*(LL-new_LL)
parameter_statistics[param]['relative_likelihood_stat'] = lamb
new_PV = list(PV)
for x, param in enumerate(PV):
new_PV[x] = 0
gv.o_function(new_PV, gm, gm.cf, gm.of, gm.df)
LL_ests = gm.estimate_flows(gm.c, gm.cf, gm.of, gm.df, gm.p)
LL_zero = np.sum((gm.f/np.sum(gm.f))*np.log((LL_ests/np.sum(LL_ests))))
parameter_statistics['all_params']['zero_vals_LL'] = LL_zero
return parameter_statistics
| bsd-3-clause |
microsoft/EconML | prototypes/dml_iv/deep_dml_iv.py | 1 | 3996 |
import numpy as np
from sklearn.model_selection import KFold
from econml.utilities import hstack
from dml_iv import _BaseDMLIV
import keras
import keras.layers as L
from keras.models import Model, clone_model
class DeepDMLIV(_BaseDMLIV):
"""
A child of the _BaseDMLIV class that specifies a deep neural network effect model
where the treatment effect is linear in some featurization of the variable X.
"""
def __init__(self, model_Y_X, model_T_X, model_T_XZ, h,
optimizer='adam',
training_options={ "epochs": 30,
"batch_size": 32,
"validation_split": 0.1,
"callbacks": [keras.callbacks.EarlyStopping(patience=2, restore_best_weights=True)]},
n_splits=2, binary_instrument=False, binary_treatment=False):
"""
Parameters
----------
model_Y_X : arbitrary model to predict E[Y | X]
model_T_X : arbitrary model to predict E[T | X]
model_T_XZ : arbitrary model to predict E[T | X, Z]
h : Model
Keras model that takes X as an input and returns a layer of dimension d_y by d_t
optimizer : keras optimizer
training_options : dictionary of keras training options
n_splits : number of splits to use in cross-fitting
binary_instrument : whether to stratify cross-fitting splits by instrument
binary_treatment : whether to stratify cross-fitting splits by treatment
"""
class ModelEffect:
"""
A wrapper class that takes as input X, T, y and estimates an effect model of the form
$y= \\theta(X) \\cdot T + \\epsilon$
"""
def __init__(self, h):
"""
Parameters
----------
h : Keras model mapping X to Theta(X)
"""
self._h = clone_model(h)
self._h.set_weights(h.get_weights())
def fit(self, Y, T, X):
"""
Parameters
----------
y : outcome
T : treatment
X : features
"""
d_x, d_t, d_y = [np.shape(arr)[1:] for arr in (X, T, Y)]
self.d_t = d_t # keep track in case we need to reshape output by dropping singleton dimensions
self.d_y = d_y # keep track in case we need to reshape output by dropping singleton dimensions
d_x, d_t, d_y = [1 if not d else d[0] for d in (d_x, d_t, d_y)]
x_in, t_in = [L.Input((d,)) for d in (d_x, d_t)]
# reshape in case we get fewer dimensions than expected from h (e.g. a scalar)
h_out = L.Reshape((d_y, d_t))(self._h(x_in))
y_out = L.Dot([2, 1])([h_out, t_in])
self.theta = Model([x_in], self._h(x_in))
model = Model([x_in, t_in], y_out)
model.compile(optimizer, loss='mse')
model.fit([X, T], Y, **training_options)
return self
def predict(self, X):
"""
Parameters
----------
X : features
"""
# HACK: DRIV doesn't expect a treatment dimension, so pretend we got a vector even if we really had a one-column array
# Once multiple treatments are supported, we'll need to fix this
self.d_t = ()
return self.theta.predict([X]).reshape((-1,)+self.d_y+self.d_t)
super(DeepDMLIV, self).__init__(model_Y_X, model_T_X, model_T_XZ,
ModelEffect(h), n_splits=n_splits,
binary_instrument=binary_instrument,
binary_treatment=binary_treatment)
| mit |
fzalkow/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
benschneider/sideprojects1 | FFT_filters/corrFiltSim.py | 1 | 2043 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 06 13:43:48 2015
@author: Ben
simulation of
correlation calculations and averages
with data which includes noise.
"""
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from time import time
lags = 25
points = int(1e4)
triggers = 1
NoiseRatio1 = 10.0
NoiseRatio2 = 10.0
k = np.float64(triggers)
autocorr2 = 0.0
t0 = time()
sig1 = np.float64(np.random.randn(points))
sig0 = sig1
for i in range(triggers):
# print i, time() - t0
a = np.float64(np.random.randn(points))
b = np.float64(np.random.randn(points))
a = np.float64((a + sig1/NoiseRatio1))
b = np.float64((b + sig1/NoiseRatio2))
# b = a/np.float64(100.0) + b (this simulates some data on top of noise)
# a = sig1 # as test
# b = sig1 # as test
# t1 = time()
# covab = np.cov(a,b)
t2 = time()
autocorr = signal.fftconvolve(a, b[::-1], mode='full')/(len(a)-1)
autocorraa = signal.fftconvolve(a, a[::-1], mode='full')/(len(a)-1)
autocorrbb = signal.fftconvolve(b, b[::-1], mode='full')/(len(b)-1)
# autocorr2 = signal.correlate(a, b, mode='full') # Very slow
# autocorr2 = np.convolve(a,b[::-1]) # Slow
t3 = time()
# autocorr = autocorr/abs(autocorr).max()
autocorr2 += np.float64(autocorr/k)
t4 = time()
# print t1-t0
# print t2-t1
# print t3-t2
# print t4-t0
print (time() - t0)/k
# plt.plot(np.arange(-len(a)+1,len(a)), autocorr2)
result = autocorr2[(len(autocorr2)+1)/2-lags:(len(autocorr2)+1)/2+lags-1]
# plt.plot(np.arange(-lags+1,lags),
# autocorr2[segments*lags-lags:segments*lags+lags-1])
plt.plot(np.arange(-lags+1, lags), result)
print result.max()
# res2 = signal.convolve(sig0, sig0[::-1],
# mode='full')/(len(sig0)-1) # this simply takes way too long!!
# plt.plot(np.arange(-lags+1,lags), res2)
# result2 = signal.fftconvolve(sig0, sig0[::-1], mode='full')/(len(sig0)-1)
# res2 = result2[(len(result2)+1)/2-lags:(len(result2)+1)/2+lags-1]
# plt.plot(np.arange(-lags+1,lags), res2)
| gpl-2.0 |
salkinium/bachelor | link_analysis/fec_plotter.py | 1 | 13015 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os
import pylab
import logging
import datetime
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle
import scipy.stats as stats
import pandas as pd
import seaborn as sns
sns.set_style("whitegrid")
import json
import math
import re
from link_file import LinkFile
class FEC_Plotter(object):
def __init__(self, files, basename):
self.basename = basename
self.files = files
self.logger = logging.getLogger('FEC Plotter')
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s: %(message)s')
# console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.handlers = []
self.logger.addHandler(ch)
self.ranges = {'rssi': range(-100, -80),
'lqi': range(20, 115),
'power': range(0, 30),
'bit_errors': range(0, 80),
'byte_errors': range(0, 40),
'temperature': range(20, 100)}
self.raw_dicts = []
for file in self.files:
with open(file, 'r') as raw_file:
# try:
values = json.load(raw_file)
values['file'] = file
match = re.search("-rs_(?P<n>[0-9]{2})_(?P<k>[0-9]{2})_random-2", file)
if (match):
values['n']= int(match.group('n'))
values['k'] = int(match.group('k'))
else:
values['n'] = 80
values['k'] = 70
self.raw_dicts.append(values)
# except:
# self.logger.error("Raw file corrupted!")
def get_mean_time_values_for_key(self, key, lower=None, upper=None):
nkey = key.lower().replace(" ", "_")
data = dict(self.raw_dicts[0][nkey])
if len(data['time']) > 0:
data['time'] = map(lambda dt: datetime.datetime.fromtimestamp(dt), data['time'])
data.update({'mean': [], 'std_l': [], 'std_u': []})
for ii in range(len(data['time'])):
mean = np.mean(data['values'][ii])
std = np.std(data['values'][ii], ddof=1)
data['mean'].append(mean)
data['std_l'].append(max(lower, (mean - std)) if lower != None else mean - std)
data['std_u'].append(min(upper, (mean - std)) if upper != None else mean + std)
return data
def create_prr_plot(self, nax=None, prr=None, name="PRR"):
if prr == None:
for values in self.raw_dicts:
if values['k'] == 70:
prr = dict(values['prr'])
break
if len(prr['time']) > 0:
prr['time'] = map(lambda dt: datetime.datetime.fromtimestamp(dt), prr['time'])
# normalize for all sent messages
for ii in range(len(prr['time'])):
all_sent = prr['sent'][ii]
if all_sent > 0:
prr['received'][ii] = float(prr['received'][ii]) / all_sent
prr['received_without_error'][ii] = float(prr['received_without_error'][ii]) / all_sent
prr['decoded_without_error'][ii] = float(prr['decoded_without_error'][ii]) / all_sent
prr['coded_without_error'][ii] = float(prr['coded_without_error'][ii]) / all_sent
if nax == None:
fig, ax = plt.subplots(1)
x, y = fig.get_size_inches()
fig.set_size_inches(x * 0.625, y * 0.625)
else:
ax = nax
zeros = np.zeros(len(prr['time']))
ones = np.ones(len(prr['time']))
legend = {'patches': [], 'labels': []}
if sum(prr['decoded_without_error']) > 0:
ax.fill_between(prr['time'], y1=prr['decoded_without_error'], y2=ones,
where=prr['decoded_without_error'] < ones, color='r', interpolate=True)
legend['patches'].append(Rectangle((0, 0), 1, 1, fc="r"))
legend['labels'].append("Decoded with error")
ax.fill_between(prr['time'], y1=prr['received'], y2=ones, where=prr['received'] < ones, color='0.65',
interpolate=True)
legend['patches'].append(Rectangle((0, 0), 1, 1, fc="0.65"))
legend['labels'].append("Reception timeout")
if sum(prr['coded_without_error']) > 0:
rx_wo_error, = ax.plot_date(prr['time'], prr['coded_without_error'], markersize=0, c='k', linestyle='-',
linewidth=0.8)
else:
rx_wo_error, = ax.plot_date(prr['time'], prr['received_without_error'], markersize=0, c='k',
linestyle='-', linewidth=0.8)
legend['patches'].append(rx_wo_error)
legend['labels'].append("Received without Error")
ax.set_ylim(ymin=0, ymax=1)
ax.set_ylabel(name, fontsize=18)
ax.legend(legend['patches'],
legend['labels'],
loc=3,
prop={'size': 12})
if nax == None:
fig.autofmt_xdate()
return ax
def create_mean_time_plot_for_key(self, key, nax=None):
nkey = key.lower().replace(" ", "_")
data = self.get_mean_time_values_for_key(key, 0 if 'errors' in nkey else None)
if len(data['time']) > 0:
if nax == None:
fig, ax = plt.subplots(1)
x, y = fig.get_size_inches()
fig.set_size_inches(x * 0.625, y * 0.625)
else:
ax = nax
ax.set_ylim(ymin=min(self.ranges[nkey]), ymax=max(self.ranges[nkey]))
if 'temperature' in nkey:
key += " ($^\circ$C)"
ax.set_ylabel(key, fontsize=18)
if 'temperature' not in nkey:
ax.plot_date(data['time'], data['std_u'], c='0.5', linestyle='-', markersize=0, linewidth=1)
ax.plot_date(data['time'], data['std_l'], c='0.5', linestyle='-', markersize=0, linewidth=1)
ax.plot_date(data['time'], data['mean'], c='k', linestyle='-', markersize=0, linewidth=1.75)
if nax == None:
fig.autofmt_xdate()
return ax
def create_throughput_plot(self, nax=None, labels=None):
if nax == None:
fig, ax = plt.subplots(1)
else:
ax = nax
colors = "bgrcmyk"
# colors = "rgb"
color_index = 0
legend = {'patches': [], 'labels': []}
for i, file in enumerate(self.raw_dicts):
prr = dict(file['prr'])
prr['time'] = map(lambda dt: datetime.datetime.fromtimestamp(dt), prr['time'])
n = file['n']
k = file['k']
throughput = float(k)/n
delta_half = datetime.timedelta(minutes=2, seconds=36)
prr_f = {'time': [prr['time'][0]],
'sent': [0],
'received': [0],
'received_without_error': [0],
'coded_without_error': [0],
'decoded_without_error': [0]}
prr_index = 0
reference_time = prr['time'][0] + delta_half + delta_half
for time_index in range(len(prr['time'])):
if prr['time'][time_index] <= reference_time:
prr_f['sent'][prr_index] += prr['sent'][time_index]
prr_f['received'][prr_index] += prr['received'][time_index]
prr_f['received_without_error'][prr_index] += prr['received_without_error'][time_index]
prr_f['coded_without_error'][prr_index] += prr['coded_without_error'][time_index]
prr_f['decoded_without_error'][prr_index] += prr['decoded_without_error'][time_index]
else:
prr_f['time'].append(reference_time + delta_half)
prr_f['sent'].append(prr['sent'][time_index])
prr_f['received'].append(prr['received'][time_index])
prr_f['received_without_error'].append(prr['received_without_error'][time_index])
prr_f['decoded_without_error'].append(prr['coded_without_error'][time_index])
prr_f['coded_without_error'].append(prr['decoded_without_error'][time_index])
prr_index += 1
reference_time += delta_half + delta_half
# normalize for all received messages
for ii in range(len(prr_f['time'])):
all_received = prr_f['received'][ii]
if all_received > 0:
prr_f['received_without_error'][ii] = float(prr_f['received_without_error'][ii]) / all_received
prr_f['decoded_without_error'][ii] = float(prr_f['decoded_without_error'][ii]) / all_received * throughput
prr_f['coded_without_error'][ii] = float(prr_f['coded_without_error'][ii]) / all_received * throughput
if len(legend['patches']) == 0:
ax.plot_date(prr_f['time'], prr_f['received_without_error'], markersize=0, c='k',
linestyle='--',
linewidth=2)
lines, = ax.plot_date(prr_f['time'], prr_f['decoded_without_error'], markersize=0, c=colors[color_index], linestyle='-', linewidth=1.5)
color_index = (color_index + 1) % (len(colors) - 0)
legend['patches'].append(lines)
legend['labels'].append("k = {}".format(k))
ax.set_ylim(ymin=0, ymax=1)
ax.set_ylabel('Throughput', fontsize=18)
if labels != None:
legend['labels'] = labels
ax.legend(legend['patches'],
legend['labels'],
loc=3,
prop={'size': 12})
# else:
# ax.annotate('k=60', xy=(prr['time'][5],6.05/8), xytext=(prr['time'][30], 4.2 / 8),
# arrowprops=dict(facecolor='w', edgecolor='k', linewidth=0.75, width=2, shrink=0.05, frac=0.23, headwidth=6))
# ax.annotate('k=74', xy=(prr['time'][5], 7.45 / 8), xytext=(prr['time'][40], 5.3 / 8),
# arrowprops=dict(facecolor='w', edgecolor='k', linewidth=0.75, width=2, shrink=0.05, frac=0.2, headwidth=6))
if nax == None:
fig.autofmt_xdate()
return ax
def create_multiple_plots(self):
fig, axarr = plt.subplots(2, sharex=True)
fig.set_size_inches(8 * 0.625, 0.625 * 2 * 5.5)
self.create_throughput_plot(axarr[0])
# self.create_mean_time_plot_for_key('Byte Errors', axarr[1])
# self.create_mean_time_plot_for_key('LQI', axarr[3])
self.create_mean_time_plot_for_key('Temperature', axarr[1])
fig.autofmt_xdate()
# self.logger.debug("Saving Plot to file: '{}_{}'".format(self.basename, "-".join(keys)))
# plt.savefig("{}_{}.pdf".format(self.basename, "-".join(keys)), bbox_inches='tight', pad_inches=0.1)
# plt.close()
return axarr
def create_multiple_plots_prr(self):
fig, axarr = plt.subplots(2, sharex=True)
fig.set_size_inches(8 * 0.625, 0.625 * 2 * 5.5)
self.create_prr_plot(axarr[0], dict(self.raw_dicts[0]['prr']), "PRR Original")
self.create_prr_plot(axarr[1], dict(self.raw_dicts[1]['prr']), "PRR Simulation")
# self.create_throughput_plot(axarr[2], ["Original", "Simulation"])
fig.autofmt_xdate()
# self.logger.debug("Saving Plot to file: '{}_{}'".format(self.basename, "-".join(keys)))
# plt.savefig("{}_{}.pdf".format(self.basename, "-".join(keys)), bbox_inches='tight', pad_inches=0.1)
# plt.close()
return axarr
def save_throughput_plot(self):
plot = self.create_multiple_plots()
if plot != None:
self.logger.debug("Saving Plot to file: '{}_{}'".format(self.basename, "Throughput"))
plt.savefig("{}_{}.pdf".format(self.basename, "Throughput"), bbox_inches='tight', pad_inches=0.1)
plt.close()
def save_prr_and_throughput_plots(self):
plot = self.create_multiple_plots_prr()
if plot != None:
self.logger.debug("Saving Plot to file: '{}_{}'".format(self.basename, "Throughput"))
plt.savefig("{}_{}.pdf".format(self.basename, "Throughput"), bbox_inches='tight', pad_inches=0.1)
plt.close()
| bsd-2-clause |
hlin117/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 42 | 27323 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
classner/barrista | barrista/monitoring.py | 1 | 84399 | # -*- coding: utf-8 -*-
"""Defines several tools for monitoring net activity."""
# pylint: disable=F0401, E1101, too-many-lines, wrong-import-order
import logging as _logging
import os as _os
import subprocess as _subprocess
import collections as _collections
import numpy as _np
# pylint: disable=no-name-in-module
from scipy.stats import bernoulli as _bernoulli
from scipy.ndimage.interpolation import rotate as _rotate
from sklearn.decomposition import PCA as _PCA
from .tools import pad as _pad
# CAREFUL! This must be imported before any caffe-related import!
from .initialization import init as _init
import caffe as _caffe
try: # pragma: no cover
import cv2 as _cv2
_cv2INTER_CUBIC = _cv2.INTER_CUBIC # pylint: disable=invalid-name
_cv2INTER_LINEAR = _cv2.INTER_LINEAR # pylint: disable=invalid-name
_cv2INTER_NEAREST = _cv2.INTER_NEAREST # pylint: disable=invalid-name
_cv2resize = _cv2.resize # pylint: disable=invalid-name
except ImportError: # pragma: no cover
_cv2 = None
_cv2INTER_CUBIC = None # pylint: disable=invalid-name
_cv2INTER_LINEAR = None # pylint: disable=invalid-name
_cv2INTER_NEAREST = None # pylint: disable=invalid-name
_cv2resize = None # pylint: disable=invalid-name
try: # pragma: no cover
import matplotlib.pyplot as _plt
import matplotlib.ticker as _tkr
import matplotlib.colorbar as _colorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable
_PLT_AVAILABLE = True
except ImportError: # pragma: no cover
_PLT_AVAILABLE = False
_init()
_LOGGER = _logging.getLogger(__name__)
class Monitor(object): # pylint: disable=R0903
"""
The monitor interface.
Should be implemented by any monitor class. The method
:py:func:`barrista.monitoring.Monitor.__call__` must be specified,
the function :py:func:`barrista.monitoring.Monitor.finalize` may
optionally be specified.
"""
def __call__(self, kwargs):
"""
The call implementation.
For available keyword arguments, see the documentation of
:py:class:`barrista.solver.SolverInterface.Fit`.
The callback signals are used as follows:
* initialize_train: called once before training starts,
* initialize_test: called once before training starts (if training with
a validation set is used) or once before testing,
* pre_fit: called before fitting mode is used (e.g., before going
back to fitting during training after a validation run),
* pre_test: called before testing mode is used (e.g., during training
before validation starts),
* post_test: called when testing finished,
* pre_train_batch: before a training batch is fed to the network,
* post_train_batch: after forwarding a training batch,
* pre_test_batch: before a test batch is fed to the network,
* post_test_batch: after a test batch was forwarded through the
network.
"""
if kwargs['callback_signal'] == 'initialize_train':
self._initialize_train(kwargs)
elif kwargs['callback_signal'] == 'initialize_test':
self._initialize_test(kwargs)
elif kwargs['callback_signal'] == 'pre_fit':
self._pre_fit(kwargs)
elif kwargs['callback_signal'] == 'pre_test':
self._pre_test(kwargs)
elif kwargs['callback_signal'] == 'post_test':
self._post_test(kwargs)
elif kwargs['callback_signal'] == 'pre_test_batch':
self._pre_test_batch(kwargs)
elif kwargs['callback_signal'] == 'post_test_batch':
self._post_test_batch(kwargs)
elif kwargs['callback_signal'] == 'pre_train_batch':
self._pre_train_batch(kwargs)
elif kwargs['callback_signal'] == 'post_train_batch':
self._post_train_batch(kwargs)
def _initialize_train(self, kwargs): # pylint: disable=C0111
pass
def _initialize_test(self, kwargs): # pylint: disable=C0111
pass
def _pre_fit(self, kwargs): # pylint: disable=C0111
pass
def _pre_test(self, kwargs): # pylint: disable=C0111
pass
def _post_test(self, kwargs): # pylint: disable=C0111
pass
def _pre_test_batch(self, kwargs): # pylint: disable=C0111
pass
def _post_test_batch(self, kwargs): # pylint: disable=C0111
pass
def _pre_train_batch(self, kwargs): # pylint: disable=C0111
pass
def _post_train_batch(self, kwargs): # pylint: disable=C0111
pass
def finalize(self, kwargs):
"""Will be called at the end of a training/fitting process."""
pass
class DataMonitor(Monitor): # pylint: disable=R0903
r"""
Monitor interface for filling the blobs of a network.
This is a specific monitor which will fill the blobs of the network
for the forward pass or solver step.
Ideally, there should only be one such monitor per callback,
but multiple ones are possible.
"""
pass
class ParallelMonitor(Monitor):
r"""
Monitor interface for monitors executed parallel to processing a batch.
The order of all monitors implementing this interface is respected. They
will work on a dummy network object with dummy blobs and prepare their
data. The dummy blob content is then copied to the real network prior
to the next batch execution.
"""
def get_parallel_blob_names(self): # pragma: no cover
"""Get the names of all blobs that must be provided for the dummy."""
raise NotImplementedError()
# pylint: disable=too-few-public-methods
class StaticDataMonitor(DataMonitor, ParallelMonitor):
r"""
Always provides the same data for a specific net input blob.
Parameters
==========
:param X: dict(string, np.ndarray)
The static input blobs to use.
"""
def __init__(self, X):
self._X = X # pylint: disable=C0103
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key, value in list(self._X.items()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert isinstance(value, _np.ndarray), (
'data must be a numpy nd array ({})'.format(type(value))
)
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
def _pre_batch(self, net, kwargs): # pylint: disable=unused-argument
for key in list(self._X.keys()):
net.blobs[key].data[...] = self._X[key]
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._X.keys())
# pylint: disable=too-few-public-methods
class OversamplingDataMonitor(DataMonitor, ParallelMonitor):
r"""
Provides oversampled data.
Parameters
==========
:param blobinfos: dict(string, string|None).
Associates blob name to oversample and optional the interpolation
method to use for resize. This may be 'n' (nearest neighbour),
'c' (cubic), 'l' (linear) or None (no interpolation). If an
interpolation method is selected, `before_oversample_resize_to` must
be not None and provide a size.
:param before_oversample_resize_to: dict(string, 2-tuple).
Specifies a size to which the image inputs will be resized before the
oversampling is invoked.
"""
def __init__(self,
blobinfos,
before_oversample_resize_to=None):
for val in blobinfos.values():
assert val in ['n', 'c', 'l', None]
self._blobinfos = blobinfos
for key, val in blobinfos.items():
if val is not None:
assert key in list(before_oversample_resize_to.keys())
self._before_oversample_resize_to = before_oversample_resize_to
self._batch_size = None
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._blobinfos.keys())
def _initialize_train(self, kwargs):
raise Exception("The OversamplingDataMonitor can only be used during "
"testing!")
def _initialize_test(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key in list(self._blobinfos.keys()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
def _pre_test(self, kwargs): # pragma: no cover
net = kwargs['testnet']
self._batch_size = net.blobs[
list(self._blobinfos.keys())[0]].data.shape[0]
def _pre_test_batch(self, kwargs): # pragma: no cover
for blob_name in list(self._blobinfos):
assert blob_name in kwargs['data_orig'], (
"The unchanged data must be provided by another DataProvider, "
"e.g., CyclingDataMonitor with `only_preload`!")
assert (len(kwargs['data_orig'][blob_name]) * 10 ==
self._batch_size), (
"The number of provided images * 10 must be the batch "
"size!")
# pylint: disable=invalid-name
for im_idx, im in enumerate(kwargs['data_orig'][blob_name]):
if self._blobinfos[blob_name] is not None:
if self._blobinfos[blob_name] == 'n':
interpolation = _cv2INTER_NEAREST
elif self._blobinfos[blob_name] == 'c':
interpolation = _cv2INTER_CUBIC
elif self._blobinfos[blob_name] == 'l':
interpolation = _cv2INTER_LINEAR
oversampling_prep = _cv2resize(
_np.transpose(im, (1, 2, 0)),
(self._before_oversample_resize_to[blob_name][1],
self._before_oversample_resize_to[blob_name][0]),
interpolation=interpolation)
else:
oversampling_prep = _np.transpose(im, (1, 2, 0))
imshape = kwargs['testnet'].blobs[blob_name].data.shape[2:4]
kwargs['testnet'].blobs[blob_name].data[
im_idx * 10:(im_idx+1) * 10] =\
_np.transpose(
_caffe.io.oversample(
[oversampling_prep],
imshape),
(0, 3, 1, 2))
# pylint: disable=too-many-instance-attributes, R0903
class CyclingDataMonitor(DataMonitor, ParallelMonitor):
r"""
Uses the data sequentially.
This monitor maps data to the network an cycles through the data
sequentially. It is the default monitor used if a user provides X
or X_val to the barrista.solver.fit method.
If further processing of the original data is intended, by using the flag
``only_preload``, the following monitors find a dictionary of lists of
the original datapoints with the name 'data_orig' in their ``kwargs``.
The data is in this case NOT written to the network input layers! This
can make sense, e.g., for the ``ResizingMonitor``.
:param X: dict of numpy.ndarray or list, or None.
If specified, is used as input data. It is used sequentially, so
shuffle it pre, if required. The keys of the dict must have
a corresponding layer name in the net. The values must be provided
already in network dimension order, i.e., usually channels, height,
width.
:param only_preload: list(string).
List of blobs for which the data will be loaded and stored in a dict
of (name: list) for further processing with other monitors.
:param input_processing_flags: dict(string, string).
Dictionary associating input blob names with intended preprocessing
methods. Valid values are:
* n: none,
* rn: resize, nearest neighbour,
* rc: resize, cubic,
* rl: resize, linear,
* pX: padding, with value X.
:param virtual_batch_size: int or None.
Override the network batch size. May only be used if ``only_preload`` is
set to True. Only makes sense with another DataMonitor in succession.
:param color_data_augmentation_sigmas: dict(string, float) or None.
Enhance the color of the samples as described in (Krizhevsky et al.,
2012). The parameter gives the sigma for the normal distribution that is
sampled to obtain the weights for scaled pixel principal components per
blob.
:param shuffle: Bool.
If set to True, shuffle the data every epoch. Default: False.
"""
# pylint: disable=too-many-arguments
def __init__(self,
X,
only_preload=None,
input_processing_flags=None,
virtual_batch_size=None,
color_data_augmentation_sigmas=None,
shuffle=False):
"""See class documentation."""
if only_preload is None:
only_preload = []
self.only_preload = only_preload
self._X = X # pylint: disable=C0103
assert X is not None
if input_processing_flags is None:
input_processing_flags = dict()
self._input_processing_flags = input_processing_flags
for key in input_processing_flags.keys():
assert key in self._X.keys()
self._padvals = dict()
for key, val in input_processing_flags.items():
assert (val in ['n', 'rn', 'rc', 'rl'] or
val.startswith('p')), (
"The input processing flags for the CyclingDataMonitor "
"must be in ['n', 'rn', 'rc', 'rl', 'p']: {}!".format(
val))
if val.startswith('p'):
self._padvals[key] = int(val[1:])
for key in self.only_preload:
assert key in self._X.keys()
self._sample_pointer = 0
self._len_data = None
self._initialized = False
self._batch_size = None
assert virtual_batch_size is None or self.only_preload, (
"If the virtual_batch_size is set, `only_preload` must be used!")
if virtual_batch_size is not None:
assert virtual_batch_size > 0
self._virtual_batch_size = virtual_batch_size
if color_data_augmentation_sigmas is None:
color_data_augmentation_sigmas = dict()
self._color_data_augmentation_sigmas = color_data_augmentation_sigmas
for key in list(self._color_data_augmentation_sigmas.keys()):
assert key in list(self._X.keys())
for key in list(self._X.keys()):
if key not in list(self._color_data_augmentation_sigmas.keys()):
self._color_data_augmentation_sigmas[key] = 0.
# pylint: disable=invalid-name
self._color_data_augmentation_weights = dict()
# pylint: disable=invalid-name
self._color_data_augmentation_components = dict()
self._shuffle = shuffle
self._sample_order = None
def get_parallel_blob_names(self):
return list(self._X.keys())
def _initialize_train(self, kwargs):
self._initialize(kwargs)
# Calculate the color channel PCA per blob if required.
for bname, sigma in self._color_data_augmentation_sigmas.items():
if sigma > 0.:
_LOGGER.info("Performing PCA for color data augmentation for "
"blob '%s'...", bname)
for im in self._X[bname]: # pylint: disable=invalid-name
assert im.ndim == 3 and im.shape[0] == 3, (
"To perform the color data augmentation, images must "
"be provided in shape (3, height, width).")
flldta = _np.vstack(
[im.reshape((3, im.shape[1] * im.shape[2])).T
for im in self._X[bname]])
# No need to copy the data another time, since `vstack` already
# copied it.
pca = _PCA(copy=False, whiten=False)
pca.fit(flldta)
self._color_data_augmentation_weights[bname] = _np.sqrt(
pca.explained_variance_.astype('float32'))
self._color_data_augmentation_components[bname] = \
pca.components_.T.astype('float32')
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
# we make sure, now that the network is available, that
# all names in the provided data dict has a corresponding match
# in the network
if self._initialized:
raise Exception("This DataProvider has already been intialized! "
"Did you maybe try to use it for train and test? "
"This is not possible!")
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
self._len_data = len(list(self._X.values())[0])
for key, value in list(self._X.items()):
if key not in self._input_processing_flags:
self._input_processing_flags[key] = 'n'
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert len(value) == self._len_data, (
'all items need to have the same length {} vs {}'.format(
len(value), self._len_data))
assert isinstance(value, _np.ndarray) or isinstance(value, list), (
'data must be a numpy nd array or list ({})'.format(type(value))
)
self._sample_order = list(range(self._len_data))
if self._shuffle:
_np.random.seed(1)
self._sample_order = _np.random.permutation(self._sample_order)
self._initialized = True
def _pre_fit(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
if self._virtual_batch_size is not None:
self._batch_size = self._virtual_batch_size
else:
self._batch_size = net.blobs[list(self._X.keys())[0]].data.shape[0]
assert self._batch_size > 0
def _pre_test(self, kwargs):
self._pre_fit(kwargs)
self._sample_pointer = 0
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
def _color_augment(self, bname, sample):
sigma = self._color_data_augmentation_sigmas[bname]
if sigma == 0.:
if isinstance(sample, (int, float)):
return float(sample)
else:
return sample.astype('float32')
else:
comp_weights = _np.random.normal(0., sigma, 3).astype('float32') *\
self._color_data_augmentation_weights[bname]
noise = _np.dot(self._color_data_augmentation_components[bname],
comp_weights.T)
return (sample.astype('float32').transpose((1, 2, 0)) + noise)\
.transpose((2, 0, 1))
def _pre_batch(self, net, kwargs): # pylint: disable=C0111, W0613, R0912
# this will simply cycle through the data.
samples_ids = [self._sample_order[idx % self._len_data]
for idx in
range(self._sample_pointer,
self._sample_pointer + self._batch_size)]
# updating the sample pointer for the next time
old_sample_pointer = self._sample_pointer
self._sample_pointer = (
(self._sample_pointer + len(samples_ids)) % self._len_data)
if self._shuffle and old_sample_pointer > self._sample_pointer:
# Epoch ended. Reshuffle.
self._sample_order = _np.random.permutation(self._sample_order)
if len(self.only_preload) > 0:
sample_dict = dict()
for key in list(self._X.keys()): # pylint: disable=too-many-nested-blocks
if key in self.only_preload:
sample_dict[key] = []
# this will actually fill the data for the network
for sample_idx in range(self._batch_size):
augmented_sample = self._color_augment(
key,
self._X[key][samples_ids[sample_idx]])
if key in self.only_preload:
sample_dict[key].append(augmented_sample)
else:
if (net.blobs[key].data[sample_idx].size == 1 and (
isinstance(self._X[key][samples_ids[sample_idx]],
(int, float)) or
self._X[key][samples_ids[sample_idx]].size == 1) or
self._X[key][samples_ids[sample_idx]].size ==
net.blobs[key].data[sample_idx].size):
if net.blobs[key].data[sample_idx].size == 1:
net.blobs[key].data[sample_idx] =\
augmented_sample
else:
net.blobs[key].data[sample_idx] = (
augmented_sample.reshape(
net.blobs[key].data.shape[1:]))
else:
if self._input_processing_flags[key] == 'n': # pragma: no cover
raise Exception(("Sample size {} does not match " +
"network input size {} and no " +
"preprocessing is allowed!")
.format(
augmented_sample.size,
net.blobs[key].data[sample_idx].size))
elif self._input_processing_flags[key] in ['rn',
'rc',
'rl']:
assert (
augmented_sample.shape[0]
== net.blobs[key].data.shape[1])
if self._input_processing_flags == 'rn':
interp_method = _cv2INTER_NEAREST
elif self._input_processing_flags == 'rc':
interp_method = _cv2INTER_CUBIC
else:
interp_method = _cv2INTER_LINEAR
for channel_idx in range(
net.blobs[key].data.shape[1]):
net.blobs[key].data[sample_idx, channel_idx] =\
_cv2resize(
augmented_sample[channel_idx],
(net.blobs[key].data.shape[3],
net.blobs[key].data.shape[2]),
interpolation=interp_method)
else:
# Padding.
net.blobs[key].data[sample_idx] = _pad(
augmented_sample,
net.blobs[key].data.shape[2:4],
val=self._padvals[key])
if len(self.only_preload) > 0:
kwargs['data_orig'] = sample_dict
class ResizingMonitor(ParallelMonitor, Monitor): # pylint: disable=R0903
r"""
Optionally resizes input data and adjusts the network input shape.
This monitor optionally resizes the input data randomly and adjusts
the network input size accordingly (this works only for batch size 1
and fully convolutional networks).
For this to work, it must be used with the ``CyclingDataMonitor`` with
``only_preload`` set.
:param blobinfos: dict(string, int).
Describes which blobs to apply the resizing operation to, and which
padding value to use for the remaining space.
:param base_scale: float.
If set to a value different than 1., apply the given base scale first
to images. If set to a value different than 1., the parameter
``interp_methods`` must be set.
:param random_change_up_to: float.
If set to a value different than 0., the scale change is altered
randomly with a uniformly drawn value from -``random_change_up_to`` to
``random_change_up_to``, that is being added to the base value.
:param net_input_size_adjustment_multiple_of: int.
If set to a value greater than 0, the blobs shape is adjusted from its
initial value (which is used as minimal one) in multiples of the given
one.
:param interp_methods: dict(string, string).
Dictionary which stores for every blob the interpolation method. The
string must be for each blob in ['n', 'c', 'l'] (nearest neighbour,
cubic, linear).
"""
def __init__(self, # pylint: disable=R0913
blobinfos,
base_scale=1.,
random_change_up_to=0.,
net_input_size_adjustment_multiple_of=0,
interp_methods=None):
"""See class documentation."""
self._blobinfos = blobinfos
self._base_scale = base_scale
self._random_change_up_to = random_change_up_to
if self._base_scale != 1. or self._random_change_up_to != 0.:
assert interp_methods is not None
for key in self._blobinfos.keys():
assert key in interp_methods.keys()
assert interp_methods[key] in ['n', 'c', 'l']
self._interp_methods = interp_methods
self._adjustment_multiple_of = net_input_size_adjustment_multiple_of
self._min_input_size = None
self._batch_size = None
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
# we make sure, now that the network is available, that
# all names in the provided data dict have a corresponding match
# in the network
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key in list(self._blobinfos.keys()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert net.blobs[key].data.ndim == 4
if self._adjustment_multiple_of > 0:
if self._min_input_size is None:
self._min_input_size = net.blobs[key].data.shape[2:4]
else:
assert (net.blobs[key].data.shape[2:4] ==
self._min_input_size), (
'if automatic input size adjustment is '
'activated, all inputs must be of same size '
'(first: {}, {}: {})'.format(
self._min_input_size, key,
net.blobs[key].data.shape[2:4]))
def _pre_fit(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
self._batch_size = net.blobs[
list(self._blobinfos.keys())[0]].data.shape[0]
if self._adjustment_multiple_of > 0:
assert self._batch_size == 1, (
"If size adjustment is activated, the batch size must be one!")
def _pre_test(self, kwargs):
self._pre_fit(kwargs)
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
# pylint: disable=C0111, W0613, R0912, too-many-locals
def _pre_batch(self, net, kwargs):
scales = None
sizes = None
if not 'data_orig' in kwargs.keys():
raise Exception(
"This data monitor needs a data providing monitor "
"to run in advance (e.g., a CyclingDataMonitor with "
"`only_preload`)!")
for key, value in kwargs['data_orig'].items():
assert len(value) == self._batch_size
if sizes is None:
sizes = []
for img in value:
sizes.append(img.shape[1:3])
else:
for img_idx, img in enumerate(value):
# pylint: disable=unsubscriptable-object
assert img.shape[1:3] == sizes[img_idx]
for key, padval in self._blobinfos.items():
if scales is None:
scales = []
for sample_idx in range(self._batch_size):
if self._random_change_up_to > 0:
scales.append(
self._base_scale +
_np.random.uniform(low=-self._random_change_up_to,
high=self._random_change_up_to))
else:
scales.append(self._base_scale)
for sample_idx in range(self._batch_size):
# Get the scaled data.
scaled_sample = kwargs['data_orig'][key][sample_idx]
if scales[sample_idx] != 1.:
scaled_sample = _np.empty((scaled_sample.shape[0],
int(scaled_sample.shape[1] *
scales[sample_idx]),
int(scaled_sample.shape[2] *
scales[sample_idx])),
dtype='float32')
if self._interp_methods[key] == 'n':
interpolation_method = _cv2INTER_NEAREST
elif self._interp_methods[key] == 'l':
interpolation_method = _cv2INTER_LINEAR
else:
interpolation_method = _cv2INTER_CUBIC
for layer_idx in range(scaled_sample.shape[0]):
scaled_sample[layer_idx] = _cv2resize(
kwargs['data_orig'][key][sample_idx][layer_idx],
(scaled_sample.shape[2],
scaled_sample.shape[1]),
interpolation=interpolation_method)
# If necessary, adjust the network input size.
if self._adjustment_multiple_of > 0:
image_height, image_width = scaled_sample.shape[1:3]
netinput_height = int(max(
self._min_input_size[0] +
_np.ceil(
float(image_height - self._min_input_size[0]) /
self._adjustment_multiple_of) *
self._adjustment_multiple_of,
self._min_input_size[0]))
netinput_width = int(max(
self._min_input_size[1] +
_np.ceil(
float(image_width - self._min_input_size[1]) /
self._adjustment_multiple_of) *
self._adjustment_multiple_of,
self._min_input_size[1]))
net.blobs[key].reshape(1,
scaled_sample.shape[0],
netinput_height,
netinput_width)
# Put the data in place.
net.blobs[key].data[sample_idx] = _pad(
scaled_sample,
net.blobs[key].data.shape[2:4],
val=padval)
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._blobinfos.keys())
# pylint: disable=too-few-public-methods
class RotatingMirroringMonitor(ParallelMonitor, Monitor):
r"""
Rotate and/or horizontally mirror samples within blobs.
For every sample, the rotation and mirroring will be consistent
across the blobs.
:param blobinfos: dict(string, int).
A dictionary containing the blob names and the padding values that
will be applied.
:param max_rotation_degrees: float.
The rotation will be sampled uniformly from the interval
[-rotation_degrees, rotation_degrees[ for each sample.
:param mirror_prob: float.
The probability that horizontal mirroring occurs. Is as well sampled
individually for every sample.
:param mirror_value_swaps: dict(string, dict(int, list(2-tuples))).
Specifies for every blob for every layer whether any values must be
swapped if mirroring is applied. This is important when, e.g.,
mirroring annotation maps with left-right information. Every 2-tuple
contains (original value, new value). The locations of the swaps are
determined before any change is applied, so the order of tuples does not
play a role.
:param mirror_layer_swaps: dict(string, list(2-tuples)).
Specifies for every blob whether any layers must be swapped if
mirroring is applied. Can be used together with mirror_value_swaps: in
this case, the `mirror_value_swaps` are applied first, then the layers
are swapped.
"""
# pylint: disable=too-many-arguments
def __init__(self,
blobinfos,
max_rotation_degrees,
mirror_prob=0.,
mirror_value_swaps=None,
mirror_layer_swaps=None):
"""See class documentation."""
self._blobinfos = blobinfos
self._rotation_degrees = max_rotation_degrees
self._mirror_prob = mirror_prob
self._batch_size = None
if mirror_value_swaps is None:
mirror_value_swaps = dict()
for key in list(mirror_value_swaps.keys()):
assert key in self._blobinfos, ("Blob not in handled: {}!"\
.format(key))
for layer_idx in list(mirror_value_swaps[key].keys()):
m_tochange = []
for swappair in mirror_value_swaps[key][layer_idx]:
assert len(swappair) == 2, (
"Swaps must be specified as (from_value, to_value): {}"\
.format(mirror_value_swaps[key][layer_idx]))
assert swappair[0] not in m_tochange, (
"Every value may change only to one new: {}."\
.format(mirror_value_swaps[key][layer_idx]))
m_tochange.append(swappair[0])
assert blobinfos[key] not in swappair, (
"A specified swap value is the fill value for this "
"blob: {}, {}, {}.".format(key,
blobinfos[key][layer_idx],
swappair))
if mirror_layer_swaps is None:
mirror_layer_swaps = dict()
for key in list(mirror_layer_swaps.keys()):
assert key in self._blobinfos, ("Blob not handled: {}!"\
.format(key))
idx_tochange = []
for swappair in mirror_layer_swaps[key]:
assert len(swappair) == 2, (
"Swaps must be specified as (from_value, to_value): {}"\
.format(swappair))
assert (swappair[0] not in idx_tochange and
swappair[1] not in idx_tochange), (
"Every value may only be swapped to or from one "
"position!")
idx_tochange.extend(swappair)
for key in list(self._blobinfos):
if key not in list(mirror_value_swaps.keys()):
mirror_value_swaps[key] = dict()
if key not in list(mirror_layer_swaps.keys()):
mirror_layer_swaps[key] = []
self._mirror_value_swaps = mirror_value_swaps
self._mirror_layer_swaps = mirror_layer_swaps
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._blobinfos.keys())
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
# we make sure, now that the network is available, that
# all names in the provided data dict have a corresponding match
# in the network
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key in list(self._blobinfos.keys()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert net.blobs[key].data.ndim == 4
for layer_idx in self._mirror_value_swaps[key].keys():
assert layer_idx < net.blobs[key].data.shape[1], ((
"The data for blob {} has not enough layers for swapping "
"{}!").format(key, layer_idx))
for swappair in self._mirror_layer_swaps[key]:
assert (swappair[0] < net.blobs[key].data.shape[1] and
swappair[1] < net.blobs[key].data.shape[1]), (
"Not enough layers in blob {} to swap {}!".format(
key, swappair))
def _pre_fit(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
self._batch_size = net.blobs[
list(self._blobinfos.keys())[0]].data.shape[0]
def _pre_test(self, kwargs):
self._pre_fit(kwargs)
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
# pylint: disable=C0111, W0613, R0912, too-many-locals
def _pre_batch(self, net, kwargs):
rotations = None
mirrorings = None
spline_interpolation_order = 0
prefilter = False
for key, padval in self._blobinfos.items():
if rotations is None:
rotations = []
if self._rotation_degrees > 0.:
rotations = _np.random.uniform(low=-self._rotation_degrees,
high=self._rotation_degrees,
size=self._batch_size)
else:
rotations = [0.] * self._batch_size
if mirrorings is None:
mirrorings = []
if self._mirror_prob > 0.:
mirrorings = _bernoulli.rvs(self._mirror_prob,
size=self._batch_size)
else:
mirrorings = [0] * self._batch_size
for sample_idx in range(self._batch_size):
if rotations[sample_idx] != 0.:
net.blobs[key].data[sample_idx] = _rotate(
net.blobs[key].data[sample_idx],
rotations[sample_idx],
(1, 2),
reshape=False,
order=spline_interpolation_order,
mode='constant',
cval=padval,
prefilter=prefilter)
if mirrorings[sample_idx] == 1.:
net.blobs[key].data[sample_idx] = \
net.blobs[key].data[sample_idx, :, :, ::-1]
for layer_idx in range(net.blobs[key].data.shape[1]):
if (layer_idx not in
self._mirror_value_swaps[key].keys()):
continue
swap_indices = dict()
swap_tuples = self._mirror_value_swaps[key][layer_idx]
# Swaps.
for swappair in swap_tuples:
swap_indices[swappair[0]] = (
net.blobs[key].data[sample_idx, layer_idx] ==\
swappair[0])
for swappair in swap_tuples:
net.blobs[key].data[sample_idx, layer_idx][
swap_indices[swappair[0]]] = swappair[1]
if len(self._mirror_layer_swaps[key]) > 0:
new_layer_order = list(
range(net.blobs[key].data.shape[1]))
for swappair in self._mirror_layer_swaps[key]:
new_layer_order[swappair[0]],\
new_layer_order[swappair[1]] = \
new_layer_order[swappair[1]],\
new_layer_order[swappair[0]]
net.blobs[key].data[...] = net.blobs[key].data[
:, tuple(new_layer_order)]
class ResultExtractor(Monitor): # pylint: disable=R0903
r"""
This monitor is designed for monitoring scalar layer results.
The main use case are salar outputs such as loss and accuracy.
IMPORTANT: this monitor will change cbparams and add new values to it,
most likely other monitors will depend on this, thus, ResultExtractors
should be among the first monitors in the callback list, e.g. by
insert them always in the beginning.
It will extract the value of a layer and add the value to the cbparam.
:param cbparam_key: string.
The key we will overwrite/set in the cbparams dict.
:param layer_name: string.
The layer to extract the value from.
"""
def __init__(self, cbparam_key, layer_name):
"""See class documentation."""
self._layer_name = layer_name
self._cbparam_key = cbparam_key
self._init = False
self._not_layer_available = True
self._test_data = None
def __call__(self, kwargs):
"""Callback implementation."""
if self._not_layer_available and self._init:
return
Monitor.__call__(self, kwargs)
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
if self._init:
raise Exception("This ResultExtractor is already initialized! "
"Did you try to use it for train and test?")
if 'test' in kwargs['callback_signal']:
tmp_net = kwargs['testnet']
else:
tmp_net = kwargs['net']
if self._layer_name in list(tmp_net.blobs.keys()):
self._not_layer_available = False
self._init = True
assert self._cbparam_key not in kwargs, (
'it is only allowed to add keys to the cbparam,',
'not overwrite them {} {}'.format(self._cbparam_key,
list(kwargs.keys())))
def _pre_train_batch(self, kwargs):
kwargs[self._cbparam_key] = 0.0
def _post_train_batch(self, kwargs):
kwargs[self._cbparam_key] = float(
kwargs['net'].blobs[self._layer_name].data[...].ravel()[0])
def _pre_test(self, kwargs):
self._test_data = []
def _post_test(self, kwargs):
kwargs[self._cbparam_key] = _np.mean(self._test_data)
def _post_test_batch(self, kwargs):
# need to multiply by batch_size since it is normalized
# internally
self._test_data.append(float(
kwargs['testnet'].blobs[self._layer_name].data[...].ravel()[0]))
kwargs[self._cbparam_key] = self._test_data[-1]
# Again, tested in a subprocess and not discovered.
# pylint: disable=R0903
class ProgressIndicator(Monitor): # pragma: no cover
r"""
Generates a progress bar with current information about the process.
The progress bar always displays completion percentage and ETA. If
available, it also displays loss, accuracy, test loss and test accuracy.
It makes use of the following keyword arguments (\* indicates required):
* ``iter``\*,
* ``max_iter``\*,
* ``train_loss``,
* ``test_loss``,
* ``train_accuracy``,
* ``test_accuracy``.
"""
def __init__(self):
"""See class documentation."""
self.loss = None
self.test_loss = None
self.accuracy = None
self.test_accuracy = None
import tqdm
self.pbarclass = tqdm.tqdm
self.pbar = None
self.last_iter = 0
def _perf_string(self):
pstr = ''
if self.loss is not None:
pstr += 'ls: {0:.4f}|'.format(self.loss)
if self.accuracy is not None:
pstr += 'ac: {0:.4f}|'.format(self.accuracy)
if self.test_loss is not None:
pstr += 'tls: {0:.4f}|'.format(self.test_loss)
if self.test_accuracy is not None:
pstr += 'tac: {0:.4f}|'.format(self.test_accuracy)
return pstr
def _post_train_batch(self, kwargs):
if self.pbar is None:
self.pbar = self.pbarclass(total=kwargs['max_iter'])
if 'train_loss' in list(kwargs.keys()):
self.loss = kwargs['train_loss']
if 'train_accuracy' in list(kwargs.keys()):
self.accuracy = kwargs['train_accuracy']
self.pbar.set_description(self._perf_string())
self.pbar.update(kwargs['iter'] + kwargs['batch_size'] - self.last_iter)
self.last_iter = kwargs['iter'] + kwargs['batch_size']
def _post_test_batch(self, kwargs):
if self.pbar is None:
self.pbar = self.pbarclass(total=kwargs['max_iter'])
if 'test_loss' in list(kwargs.keys()):
self.test_loss = kwargs['test_loss']
if 'test_accuracy' in list(kwargs.keys()):
self.test_accuracy = kwargs['test_accuracy']
self.pbar.set_description(self._perf_string())
self.pbar.update(kwargs['iter'] - self.last_iter)
self.last_iter = kwargs['iter']
def _post_test(self, kwargs):
# Write the mean if possible.
if self.pbar is not None:
if 'test_loss' in list(kwargs.keys()):
self.test_loss = kwargs['test_loss']
if 'test_accuracy' in list(kwargs.keys()):
self.test_accuracy = kwargs['test_accuracy']
self.pbar.set_description(self._perf_string())
self.pbar.update(kwargs['iter'] - self.last_iter)
self.last_iter = kwargs['iter']
def finalize(self, kwargs): # pylint: disable=W0613
"""Call ``progressbar.finish()``."""
if self.pbar is not None:
self.pbar.close()
def _sorted_ar_from_dict(inf, key): # pragma: no cover
iters = []
vals = []
for values in inf:
if values.has_key(key):
iters.append(int(values['NumIters']))
vals.append(float(values[key]))
sortperm = _np.argsort(iters)
arr = _np.array([iters, vals]).T
return arr[sortperm, :]
def _draw_perfplot(phases, categories, ars, outfile): # pragma: no cover
"""Draw the performance plots."""
fig, axes = _plt.subplots(nrows=len(categories), sharex=True)
for category_idx, category in enumerate(categories):
ax = axes[category_idx] # pylint: disable=invalid-name
ax.set_title(category.title())
for phase in phases:
if phase + '_' + category not in ars.keys():
continue
ar = ars[phase + '_' + category] # pylint: disable=invalid-name
alpha = 0.7
color = 'b'
if phase == 'test':
alpha = 1.0
color = 'g'
ax.plot(ar[:, 0], ar[:, 1],
label=phase.title(), c=color, alpha=alpha)
if phase == 'test':
ax.scatter(ar[:, 0], ar[:, 1],
c=color, s=50)
ax.set_ylabel(category.title())
ax.grid()
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
_plt.savefig(outfile, bbox_inches='tight')
_plt.close(fig)
class JSONLogger(Monitor): # pylint: disable=R0903
r"""
Logs available information to a JSON file.
The information is stored in a dictionary of lists. The lists contain
score information and the iteration at which it was obtained. The
currently logged scores are loss, accuracy, test loss and test accuracy.
The logger makes use of the following keyword arguments
(\* indicates required):
* ``iter``\*,
:param path: string.
The path to store the file in.
:param name: string.
The filename. Will be prefixed with 'barrista_' and '.json' will be
appended.
:param logging: dict of lists.
The two keys in the dict which are used are test, train.
For each of those a list of keys can be provided, those keys
have to be available in the kwargs/cbparams structure.
Usually the required data is provided by the ResultExtractor.
:param base_iter: int or None.
If provided, add this value to the number of iterations. This overrides
the number of iterations retrieved from a loaded JSON log to append to.
:param write_every: int or None.
Write the JSON log every `write_every` iterations. The log is always
written upon completion of the training. If it is None, the log is only
written on completion.
:param create_plot: bool.
If set to True, create a plot at `path` when the JSON log is written with
the name of the JSON file + `_plot.png`. Default: False.
"""
# pylint: disable=too-many-arguments
def __init__(self,
path,
name,
logging,
base_iter=None,
write_every=None,
create_plot=False):
"""See class documentation."""
import json
self.json_package = json
self.json_filename = str(_os.path.join(
path,
'barrista_' + name + '.json'))
if base_iter is None:
self.base_iter = 0
else:
self.base_iter = base_iter
if _os.path.exists(self.json_filename):
with open(self.json_filename, 'r') as infile:
self.dict = self.json_package.load(infile)
if base_iter is None:
for key in ['train', 'test']:
for infdict in self.dict[key]:
if infdict.has_key('NumIters'):
self.base_iter = max(self.base_iter,
infdict['NumIters'])
_LOGGER.info("Appending to JSON log at %s from iteration %d.",
self.json_filename,
self.base_iter)
else:
self.dict = {'train': [], 'test': [], 'barrista_produced': True}
assert write_every is None or write_every > 0
self._write_every = write_every
self._logging = logging
self._create_plot = create_plot
if self._create_plot:
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use plotting!")
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs): # pylint: disable=unused-argument
for key in list(self._logging.keys()):
assert key in ['train', 'test'], (
'only train and test is supported by this logger')
def _post_test(self, kwargs):
self._post('test', kwargs)
def _post_train_batch(self, kwargs):
self._post('train', kwargs)
def _post(self, phase_name, kwargs): # pylint: disable=C0111
if phase_name not in self._logging: # pragma: no cover
return
if phase_name == 'train':
kwargs['iter'] += kwargs['batch_size']
if (self._write_every is not None and
kwargs['iter'] % self._write_every == 0):
with open(self.json_filename, 'w') as outf:
self.json_package.dump(self.dict, outf)
if self._create_plot: # pragma: no cover
categories = set()
arrs = dict()
for plot_phase_name in ['train', 'test']:
for key in self._logging[plot_phase_name]:
categories.add(key[len(plot_phase_name) + 1:])
arrs[key] = _sorted_ar_from_dict(self.dict[plot_phase_name],
key)
_draw_perfplot(['train', 'test'],
categories,
arrs,
self.json_filename + '_plot.png')
for key in self._logging[phase_name]:
if key in kwargs:
self.dict[phase_name].append({'NumIters':
kwargs['iter'] + self.base_iter,
key: kwargs[key]})
if phase_name == 'train':
kwargs['iter'] -= kwargs['batch_size']
def finalize(self, kwargs): # pylint: disable=W0613
"""Write the json file."""
with open(self.json_filename, 'w') as outf:
self.json_package.dump(self.dict, outf)
if self._create_plot: # pragma: no cover
categories = set()
arrs = dict()
for phase_name in ['train', 'test']:
for key in self._logging[phase_name]:
categories.add(key[len(phase_name) + 1:])
arrs[key] = _sorted_ar_from_dict(self.dict[phase_name], key)
_draw_perfplot(['train', 'test'],
categories,
arrs,
self.json_filename + '_plot.png')
class Checkpointer(Monitor): # pylint: disable=R0903
r"""
Writes the network blobs to disk at certain iteration intervals.
The logger makes use of the following keyword arguments
(\* indicates required):
* ``iter``\*,
* ``net``\*,
* ``batch_size``\*.
:param name_prefix: string or None.
The first part of the output filenames to generate. The prefix '_iter_,
the current iteration, as well as '.caffemodel' is added.
If you are using a caffe version from later than Dec. 2015, caffe's
internal snapshot method is exposed to Python and also snapshots the
solver. If it's available, then this method will be used. However,
in that case, it's not possible to influence the storage location
from Python. Please use the solver parameter ``snapshot_prefix``
when constructing the solver instead (this parameter may be None
and is unused then).
:param iterations: int > 0.
Always if the current number of iterations is divisible by iterations,
the network blobs are written to disk. Hence, this value must be a
multiple of the batch size!
"""
def __init__(self,
name_prefix,
iterations,
base_iterations=0):
"""See class documentation."""
assert iterations > 0
_LOGGER.info('Setting up checkpointing with name prefix %s every ' +
'%d iterations.', name_prefix, iterations)
self.name_prefix = name_prefix
self.iterations = iterations
self.created_checkpoints = []
self._base_iterations = base_iterations
# pylint: disable=arguments-differ
def _post_train_batch(self, kwargs, finalize=False):
assert self.iterations % kwargs['batch_size'] == 0, (
'iterations not multiple of batch_size, {} vs {}'.format(
self.iterations, kwargs['batch_size']))
# Prevent double-saving.
if kwargs['iter'] in self.created_checkpoints:
return
if ((kwargs['iter'] + self._base_iterations +
kwargs['batch_size']) % self.iterations == 0 or
finalize):
self.created_checkpoints.append(kwargs['iter'])
# pylint: disable=protected-access
if not hasattr(kwargs['solver']._solver, 'snapshot'): # pragma: no cover
checkpoint_filename = (
self.name_prefix + '_iter_' +
str(int((kwargs['iter'] + self._base_iterations) /
kwargs['batch_size']) + 1) +
'.caffemodel')
_LOGGER.debug("Writing checkpoint to file '%s'.",
checkpoint_filename)
kwargs['net'].save(checkpoint_filename)
else:
# pylint: disable=protected-access
kwargs['solver']._solver.snapshot()
caffe_checkpoint_filename = (self.name_prefix +
'_iter_' +
str((kwargs['iter'] + self._base_iterations) /
kwargs['batch_size'] + 1) +
'.caffemodel')
caffe_sstate_filename = (self.name_prefix +
'_iter_' +
str((kwargs['iter'] + self._base_iterations) /
kwargs['batch_size'] + 1) +
'.solverstate')
_LOGGER.debug('Writing checkpoint to file "[solverprefix]%s" ' +
'and "[solverprefix]%s".',
caffe_checkpoint_filename,
caffe_sstate_filename)
assert _os.path.exists(caffe_checkpoint_filename), (
"An error occured checkpointing to {}. File not found. "
"Make sure the `base_iterations` and the `name_prefix` "
"are correct.").format(caffe_checkpoint_filename)
assert _os.path.exists(caffe_sstate_filename), (
"An error occured checkpointing to {}. File not found. "
"Make sure the `base_iterations` and the `name_prefix` "
"are correct.").format(caffe_sstate_filename)
def finalize(self, kwargs):
"""Write a final checkpoint."""
# Account for the counting on iteration increase for the last batch.
kwargs['iter'] -= kwargs['batch_size']
self._post_train_batch(kwargs, finalize=True)
kwargs['iter'] += kwargs['batch_size']
class GradientMonitor(Monitor):
"""
Tools to keep an eye on the gradient.
Create plots of the gradient. Creates histograms of the gradient for all
``selected_parameters`` and creates an overview plot with the maximum
absolute gradient per layer. If ``create_videos`` is set and ffmpeg is
available, automatically creates videos.
:param write_every: int.
Write every x iterations. Since matplotlib takes some time to run, choose
with care.
:param output_folder: string.
Where to store the outputs.
:param selected_parameters: dict(string, list(int)) or None.
Which parameters to include in the plots. The string is the name of the
layer, the list of integers contains the parts to include, e.g., for a
convolution layer, specify the name of the layer as key and 0 for
the parameters of the convolution weights, 1 for the biases per channel.
The order and meaning of parameter blobs is determined by caffe. If
None, then all parameters are plotted. Default: None.
:param relative: Bool.
If set to True, will give the weights relative to the max absolute weight
in the target parameter blob. Default: False.
:param iteroffset: int.
An iteration offset if training is resumed to not overwrite existing
output. Default: 0.
:param create_videos: Bool.
If set to True, try to create a video using ffmpeg. Default: True.
:param video_frame_rate: int.
The video frame rate.
"""
def __init__(self, # pylint: disable=too-many-arguments
write_every,
output_folder,
selected_parameters=None,
relative=False,
iteroffset=0,
create_videos=True,
video_frame_rate=1):
assert write_every > 0
self._write_every = write_every
self._output_folder = output_folder
self._selected_parameters = selected_parameters
self._relative = relative
self._n_parameters = None
self._iteroffset = iteroffset
self._create_videos = create_videos
self._video_frame_rate = video_frame_rate
def _initialize_train(self, kwargs): # pragma: no cover
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use the GradientMonitor!")
assert self._write_every % kwargs['batch_size'] == 0, (
"`write_every` must be a multiple of the batch size!")
self._n_parameters = 0
if self._selected_parameters is not None:
for name in self._selected_parameters.keys():
assert name in kwargs['net'].params.keys()
for p_idx in self._selected_parameters[name]:
assert p_idx >= 0
assert len(kwargs['net'].params[name]) > p_idx
self._n_parameters += 1
else:
self._selected_parameters = _collections.OrderedDict()
for name in kwargs['net'].params.keys():
self._selected_parameters[name] = range(len(
kwargs['net'].params[name]))
self._n_parameters += len(kwargs['net'].params[name])
# pylint: disable=too-many-locals
def _post_train_batch(self, kwargs): # pragma: no cover
if kwargs['iter'] % self._write_every == 0:
net = kwargs['net']
maxabsupdates = {}
maxabsupdates_flat = []
# Create histograms.
fig, axes = _plt.subplots(nrows=1,
ncols=self._n_parameters,
figsize=(self._n_parameters * 3, 3))
ax_idx = 0
xfmt = _tkr.FormatStrFormatter('%.1e')
for lname in self._selected_parameters.keys():
maxabsupdates[lname] = []
for p_idx in self._selected_parameters[lname]:
if self._relative:
lgradient = (net.params[lname][p_idx].diff /
net.params[lname][p_idx].data.max())
else:
lgradient = net.params[lname][p_idx].diff
maxabsupdates[lname].append(_np.max(_np.abs(lgradient)))
maxabsupdates_flat.append(_np.max(_np.abs(lgradient)))
axes[ax_idx].set_title(lname + ', p%d' % (p_idx))
axes[ax_idx].hist(list(lgradient.flat),
25,
normed=1,
alpha=0.5)
axes[ax_idx].set_xticks(_np.linspace(-maxabsupdates_flat[-1],
maxabsupdates_flat[-1],
num=3))
axes[ax_idx].yaxis.set_visible(False)
axes[ax_idx].xaxis.set_major_formatter(xfmt)
ax_idx += 1
_plt.tight_layout(rect=[0, 0.03, 1, 0.95])
_plt.suptitle("Gradient histograms for iteration %d" % (
kwargs['iter'] + self._iteroffset))
if self._relative:
ghname = self._output_folder + 'gradient_hists_rel_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
else:
ghname = self._output_folder + 'gradient_hists_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
_plt.savefig(ghname)
_plt.close(fig)
# Create the magnitude overview plot.
fig = _plt.figure(figsize=(self._n_parameters * 1, 1.5))
_plt.title("Maximum absolute gradient per layer (iteration %d)" % (
kwargs['iter'] + self._iteroffset))
ax = _plt.gca() # pylint: disable=invalid-name
# pylint: disable=invalid-name
im = ax.imshow(_np.atleast_2d(_np.array(maxabsupdates_flat)),
interpolation='none')
ax.yaxis.set_visible(False)
divider = _make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.05)
_plt.colorbar(im, cax=cax, ticks=_np.linspace(_np.min(maxabsupdates_flat),
_np.max(maxabsupdates_flat),
5))
_plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if self._relative:
gmname = self._output_folder + 'gradient_magnitude_rel_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
else:
gmname = self._output_folder + 'gradient_magnitude_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
_plt.savefig(gmname)
_plt.close(fig)
def finalize(self, kwargs): # pragma: no cover
if self._create_videos:
_LOGGER.debug("Creating gradient videos...")
try:
if not _os.path.exists(_os.path.join(self._output_folder,
'videos')):
_os.mkdir(_os.path.join(self._output_folder, 'videos'))
if self._relative:
rel_add = '_rel'
else:
rel_add = ''
with open(_os.devnull, 'w') as quiet:
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'gradient_hists' + rel_add + '_%d.png'),
_os.path.join(self._output_folder,
'videos',
'gradient_hists' + rel_add + '.mp4')
], stdout=quiet, stderr=quiet)
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'gradient_magnitude' + rel_add + '_%d.png'),
_os.path.join(self._output_folder,
'videos',
'gradient_magnitude' + rel_add + '.mp4')
], stdout=quiet, stderr=quiet)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Could not create videos! Error: %s. Is " +
"ffmpeg available on the command line?",
str(ex))
_LOGGER.debug("Done.")
class ActivationMonitor(Monitor):
"""
Tools to keep an eye on the net activations.
Create plots of the net activations. If ``create_videos`` is set and
ffmpeg is available, automatically creates videos.
:param write_every: int.
Write every x iterations. Since matplotlib takes some time to run, choose
with care.
:param output_folder: string.
Where to store the outputs.
:param selected_blobs: list(string) or None.
Which blobs to include in the plots. If
None, then all parameters are plotted. Default: None.
:param iteroffset: int.
An iteration offset if training is resumed to not overwrite existing
output. Default: 0.
:param sample: dict(string, NDarray(3D)).
A sample to use that will be forward propagated to obtain the activations.
Must contain one for every input layer of the network. Each sample is not
preprocessed and must fit the input. If None, use the existing values
from the blobs.
:param create_videos: Bool.
If set to True, try to create a video using ffmpeg. Default: True.
:param video_frame_rate: int.
The video frame rate.
"""
# pylint: disable=too-many-arguments
def __init__(self, # pragma: no cover
write_every,
output_folder,
selected_blobs=None,
iteroffset=0,
sample=None,
create_videos=True,
video_frame_rate=1):
assert write_every > 0
self._write_every = write_every
self._output_folder = output_folder
self._selected_blobs = selected_blobs
self._n_parameters = None
self._iteroffset = iteroffset
self._create_videos = create_videos
self._video_frame_rate = video_frame_rate
self._sample = sample
def _initialize_train(self, kwargs): # pragma: no cover
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use the ActivationMonitor!")
assert self._write_every % kwargs['batch_size'] == 0, (
"`write_every` must be a multiple of the batch size!")
self._n_parameters = 0
if self._selected_blobs is not None:
for name in self._selected_blobs:
assert name in kwargs['net'].blobs.keys(), (
"The activation monitor should monitor {}, which is not "
"part of the net!").format(name)
self._n_parameters += 1
else:
self._selected_blobs = []
for name in kwargs['net'].blobs.keys():
bshape = kwargs['net'].blobs[name].data.shape
if len(bshape) == 4:
self._selected_blobs.append(name)
self._n_parameters += 1
if self._sample is not None:
for inp_name in self._sample.keys():
assert (kwargs['net'].blobs[inp_name].data.shape[1:] ==
self._sample[inp_name].shape), (
"All provided inputs as `sample` must have the shape "
"of an input blob, starting from its sample "
"dimension. Does not match for %s: %s vs. %s." % (
inp_name,
str(kwargs['net'].blobs[inp_name].data.shape[1:]),
str(self._sample[inp_name].shape)))
# pylint: disable=too-many-locals
def _post_train_batch(self, kwargs): # pragma: no cover
if kwargs['iter'] % self._write_every == 0:
net = kwargs['net']
if self._sample is not None:
for bname in self._sample.keys():
net.blobs[bname].data[-1, ...] = self._sample[bname]
net.forward()
for bname in self._selected_blobs:
blob = net.blobs[bname].data
nchannels = blob.shape[1]
gridlen = int(_np.ceil(_np.sqrt(nchannels)))
fig, axes = _plt.subplots(nrows=gridlen,
ncols=gridlen,
squeeze=False)
bmin = blob[-1].min()
bmax = blob[-1].max()
for c_idx in range(nchannels):
ax = axes.flat[c_idx] # pylint: disable=invalid-name
im = ax.imshow(blob[-1, c_idx], # pylint: disable=invalid-name
vmin=bmin,
vmax=bmax,
cmap='Greys_r',
interpolation='none')
ax.set_title('C%d' % (c_idx))
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
# pylint: disable=undefined-loop-variable
for blank_idx in range(c_idx + 1, gridlen * gridlen):
ax = axes.flat[blank_idx] # pylint: disable=invalid-name
ax.axis('off')
_plt.tight_layout(rect=[0, 0.03, 1, 0.95])
_plt.suptitle("Activations in blob %s (iteration %d)" % (
bname, self._iteroffset + kwargs['iter']))
cbax, cbkw = _colorbar.make_axes([ax for ax in axes.flat])
fig.colorbar(im, cax=cbax, **cbkw)
_plt.savefig(self._output_folder +
'activations_%s_%d.png' % (
bname,
(self._iteroffset + kwargs['iter']) /
self._write_every))
_plt.close(fig)
def finalize(self, kwargs): # pragma: no cover
if self._create_videos:
_LOGGER.debug("Creating activation videos...")
try:
if not _os.path.exists(_os.path.join(self._output_folder,
'videos')):
_os.mkdir(_os.path.join(self._output_folder, 'videos'))
for bname in self._selected_blobs:
with open(_os.devnull, 'w') as quiet:
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'activations_' + bname + '_%d.png'),
_os.path.join(self._output_folder,
'videos',
'activations_' + bname + '.mp4')
], stdout=quiet, stderr=quiet)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Could not create videos! Error: %s. Is " +
"ffmpeg available on the command line?",
str(ex))
_LOGGER.debug("Done.")
class FilterMonitor(Monitor):
"""
Tools to keep an eye on the filters.
Create plots of the network filters. Creates filter plots for all
``selected_parameters``. If ``create_videos`` is set and ffmpeg is
available, automatically creates videos.
:param write_every: int.
Write every x iterations. Since matplotlib takes some time to run, choose
with care.
:param output_folder: string.
Where to store the outputs.
:param selected_parameters: dict(string, list(int)) or None.
Which parameters to include in the plots. The string is the name of the
layer, the list of integers contains the parts to include, e.g., for a
convolution layer, specify the name of the layer as key and 0 for
the parameters of the convolution weights, 1 for the biases per channel.
The order and meaning of parameter blobs is determined by caffe. If
None, then all parameters are plotted. **Only 4D blobs can be plotted!**
Default: None.
:param iteroffset: int.
An iteration offset if training is resumed to not overwrite existing
output. Default: 0.
:param create_videos: Bool.
If set to True, try to create a video using ffmpeg. Default: True.
:param video_frame_rate: int.
The video frame rate.
"""
# pylint: disable=too-many-arguments
def __init__(self, # pragma: no cover
write_every,
output_folder,
selected_parameters=None,
iteroffset=0,
create_videos=True,
video_frame_rate=1):
assert write_every > 0
self._write_every = write_every
self._output_folder = output_folder
self._selected_parameters = selected_parameters
self._n_parameters = None
self._iteroffset = iteroffset
self._create_videos = create_videos
self._video_frame_rate = video_frame_rate
def _initialize_train(self, kwargs): # pragma: no cover
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use the FilterMonitor!")
assert self._write_every % kwargs['batch_size'] == 0, (
"`write_every` must be a multiple of the batch size!")
self._n_parameters = 0
if self._selected_parameters is not None:
for name in self._selected_parameters.keys():
assert name in kwargs['net'].params.keys()
for p_idx in self._selected_parameters[name]:
assert p_idx >= 0
assert len(kwargs['net'].params[name][p_idx].data.shape) == 4
self._n_parameters += 1
else:
self._selected_parameters = _collections.OrderedDict()
for name in kwargs['net'].params.keys():
self._selected_parameters[name] = []
for pindex in range(len(kwargs['net'].params[name])):
if len(kwargs['net'].params[name][pindex].data.shape) == 4:
self._selected_parameters[name].append(pindex)
self._n_parameters += 1
def _post_train_batch(self, kwargs): # pragma: no cover
if kwargs['iter'] % self._write_every == 0:
net = kwargs['net']
for pname in self._selected_parameters.keys():
for pindex in self._selected_parameters[pname]:
fig = _plt.figure()
param = net.params[pname][pindex].data
border = 2
collected_weights = _np.zeros((param.shape[0] *
(param.shape[2] + border) +
border,
param.shape[1] *
(param.shape[3] + border) +
border), dtype='float32')
pmin = param.min()
pmax = param.max()
# Build up the plot manually because matplotlib is too slow.
for filter_idx in range(param.shape[0]):
for layer_idx in range(param.shape[1]):
collected_weights[border + filter_idx * (param.shape[2] + border):
border + filter_idx * (param.shape[2] + border) +
param.shape[2],
border + layer_idx * (param.shape[3] + border):
border + layer_idx * (param.shape[3] + border) +
param.shape[3]] = (
(param[filter_idx, layer_idx] - pmin)
/ (pmax - pmin))
_plt.imshow(collected_weights,
cmap='Greys_r',
interpolation='none')
ax = _plt.gca() # pylint: disable=invalid-name
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
ax.set_title((
"Values of layer %s, param %d\n" +
"(iteration %d, min %.1e, max %.1e)") % (
pname, pindex, self._iteroffset + kwargs['iter'], pmin, pmax))
_plt.savefig(self._output_folder +
'parameters_%s_%d_%d.png' % (
pname,
pindex,
(self._iteroffset + kwargs['iter']) /
self._write_every))
_plt.close(fig)
def finalize(self, kwargs): # pragma: no cover
if self._create_videos:
_LOGGER.debug("Creating filter videos...")
try:
if not _os.path.exists(_os.path.join(self._output_folder,
'videos')):
_os.mkdir(_os.path.join(self._output_folder, 'videos'))
for pname in self._selected_parameters.keys():
for pindex in self._selected_parameters[pname]:
with open(_os.devnull, 'w') as quiet:
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'parameters_' +
pname + '_' +
str(pindex) + '_' +
'%d.png'),
_os.path.join(self._output_folder,
'videos',
'parameters_' +
pname + '_' +
str(pindex) + '.mp4')
], stdout=quiet, stderr=quiet)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Could not create videos! Error: %s. Is " +
"ffmpeg available on the command line?",
str(ex))
_LOGGER.debug("Done.")
| mit |
kiryx/pagmo | PyGMO/util/_analysis.py | 5 | 169844 | from __future__ import print_function as _dummy
class analysis:
"""
This class contains the tools necessary for exploratory analysis of the search,
fitness and constraint space of a given problem. Several tests can be conducted
on a low-discrepancy sample of the search space or on an existing population.
The aim is to gain insight into the problem properties and to aid algorithm
selection.
"""
def __init__(self, input_object, npoints=0, method='sobol', first=1, output_to_file=False):
"""
Constructor of the analysis class from a problem or population object. Also calls
analysis.sample when npoints>0 or by default when a population object is input.
**USAGE:**
analysis(input_object=prob [, npoints=1000, method='sobol', first=1, output_to_file=False])
* input_object: problem or population object used to initialise the analysis.
* npoints: number of points of the search space to sample. If a population is input,
a random subset of its individuals of size npoints will be sampled. Option npoints=='all' will
sample the whole population. If a problem is input, a set of size npoints will be
selected using the specified method. If set to zero, no sampling will be conducted.
* method: method used to sample the normalized search space. Used only when input_object is a problem, otherwise ignored. Options are:
* 'sobol': sampling based on sobol low-discrepancy sequence. Default option.
* 'faure': sampling based on faure low-discrepancy sequence. Dim [2,23].
* 'halton': sampling based on halton low-discrepancy sequence. Dim <10.
* 'lhs': latin hypersquare sampling.
* 'montecarlo': Monte Carlo (random) sampling.
* first: used only when sampling with 'sobol', 'faure' or 'halton'. Index of the first element
of the sequence that will be included in the sample. Defaults to 1. Set to >1 to skip. If set
to 0 with 'sobol' method, point (0,0,...,0) will also be sampled.
* output_to_file: if True, all outputs generated by this class will be written to the file
log.txt and all plots saved as .png images in the directory ./analysis_X/ which is specified
in attribute analysis.dir. If False, all of them will be shown on screen.
"""
self.npoints = 0
self.points = []
self.f = []
self.f_offset = []
self.f_span = []
self.grad_npoints = 0
self.grad_points = []
self.grad = []
self.c = []
self.c_span = []
self.local_nclusters = 0
self.local_initial_npoints = 0
self.dim, self.cont_dim, self.int_dim, self.c_dim, self.ic_dim, self.f_dim = (
0, 0, 0, 0, 0, 0)
self.fignum = 1
self.lin_conv_npairs = 0
self.c_lin_npairs = 0
self.dir = None
if isinstance(input_object, core._core.population):
self.prob = input_object.problem
self.pop = input_object
method = 'pop'
if npoints == 'all':
self.sample(len(self.pop), 'pop')
elif npoints > 0:
self.sample(npoints, 'pop')
elif isinstance(input_object, problem._base):
self.prob = input_object
self.pop = []
if npoints > 0:
self.sample(npoints, method, first)
else:
raise ValueError(
"analysis: input either a problem or a population object to initialise the class")
if output_to_file:
import os
i = 0
while(True):
i += 1
if not os.path.exists('./analysis_' + str(i)):
os.makedirs('./analysis_' + str(i))
self.dir = './analysis_' + str(i)
break
output = open(self.dir + '/log.txt', 'w+')
print(
"===============================================================================", file=output)
print(
" ANALYSIS ", file=output)
print(
"===============================================================================", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
print("PROBLEM PROPERTIES", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
print(self.prob, file=output)
if self.npoints > 0:
print(
"-------------------------------------------------------------------------------", file=output)
print('SAMPLED [' + str(self.npoints) + '] POINTS VIA',
method, 'METHOD FOR THE SUBSEQUENT TESTS', file=output)
output.close()
##########################################################################
# SAMPLING
##########################################################################
def sample(self, npoints, method='sobol', first=1):
"""
Routine used to sample the search space. Samples in x, f and c and scales the datasets.
**USAGE:**
analysis.sample(npoints=1000 [, method='sobol', first=1])
* npoints: number of points of the search space to sample.
* method: method used to sample the normalized search space. Options are:
* 'sobol': sampling based on sobol low-discrepancy sequence. Default option.
* 'faure': sampling based on faure low-discrepancy sequence. Dim [2,23].
* 'halton': sampling based on halton low-discrepancy sequence. Dim <10.
* 'lhs': latin hypersquare sampling.
* 'montecarlo': Monte Carlo (random) sampling.
* 'pop': sampling by selection of random individuals from a population. Can only
be used when a population object has ben input to the constructor.
* first: used only when sampling with 'sobol', 'faure' or 'halton'. Index of the first element
of the sequence that will be included in the sample. Defaults to 1. Set to >1 to skip. If set
to 0 with 'sobol' method, point (0,0,...,0) will also be sampled.
**The following parameters are stored as attributes of the class:**
* analysis.npoints: number of points sampled.
* analysis.points[number of points sampled][search dimension]: chromosome of points sampled.
* analysis.f[number of points sampled][fitness dimension]: fitness vector of points sampled.
* analysis.ub[search dimension]: upper bounds of search space.
* analysis.lb[search dimension]: lower bounds of search space.
* analysis.dim: search dimension, number of variables in search space
* analysis.cont_dim: number of continuous variables in search space
* analysis.int_dim: number of integer variables in search space
* analysis.c_dim: number of constraints
* analysis.ic_dim: number of inequality constraints
* analysis.f_dim: fitness dimension, number of objectives
* analysis.f_offset: minimum values of unscaled fitness functions. Used for scaling.
* analysis.f_span: peak-to-peak values of unscaled fitness functions. Used for scaling.
**NOTE:** when calling sample, all sampling methods can be used and the search space is sampled within its box constraints. If a population has been input to the
constructor, a subset of individuals are selected (randomly).
"""
from PyGMO.util import lhs, sobol, faure, halton
self.points = []
self.f = []
self.npoints = npoints
self.lb = list(self.prob.lb)
self.ub = list(self.prob.ub)
self.dim, self.cont_dim, self.int_dim, self.c_dim, self.ic_dim, self.f_dim = \
self.prob.dimension, self.prob.dimension - \
self.prob.i_dimension, self.prob.i_dimension, self.prob.c_dimension, self.prob. ic_dimension, self.prob.f_dimension
if self.npoints <= 0:
raise ValueError(
"analysis.sample: at least one point needs to be sampled")
if method == 'pop':
poplength = len(self.pop)
if poplength == 0:
raise ValueError(
"analysis.sample: method 'pop' specified but population object inexistant or void")
elif poplength < npoints:
raise ValueError(
"analysis.sample: it is not possible to sample more points than there are in the population via 'pop'")
elif poplength == npoints:
self.points = [list(self.pop[i].cur_x)
for i in range(poplength)]
self.f = [list(self.pop[i].cur_f) for i in range(poplength)]
else:
idx = range(poplength)
try:
from numpy.random import randint
except ImportError:
raise ImportError(
"analysis.sample needs numpy to run when sampling partially a population. Is it installed?")
for i in range(poplength, poplength - npoints, -1):
r = idx.pop(randint(i))
self.points.append(list(self.pop[r].cur_x))
self.f.append(list(self.pop[r].cur_f))
elif method == 'montecarlo':
try:
from numpy.random import random
except ImportError:
raise ImportError(
"analysis.sample needs numpy to run when sampling with montecarlo method. Is it installed?")
for i in range(npoints):
self.points.append([])
for j in range(self.dim):
r = random()
r = (r * self.ub[j] + (1 - r) * self.lb[j])
if j >= self.cont_dim:
r = round(r, 0)
self.points[i].append(r)
self.f.append(list(self.prob.objfun(self.points[i])))
else:
if method == 'sobol':
sampler = sobol(self.dim, first)
elif method == 'lhs':
sampler = lhs(self.dim, npoints)
elif method == 'faure':
sampler = faure(self.dim, first)
elif method == 'halton':
sampler = halton(self.dim, first)
else:
raise ValueError(
"analysis.sample: method specified is not valid. choose 'sobol', 'lhs', 'faure','halton', 'montecarlo' or 'pop'")
for i in range(npoints):
temp = list(sampler.next()) # sample in the unit hypercube
for j in range(self.dim):
temp[j] = temp[j] * self.ub[j] + \
(1 - temp[j]) * self.lb[j] # resize
if j >= self.cont_dim:
temp[j] = round(temp[j], 0) # round if necessary
self.points.append(temp)
self.f.append(list(self.prob.objfun(temp)))
self.f_offset = self._percentile(0)
self.f_span = self._ptp()
for i in range(self.f_dim):
if self.f_span[i] == 0:
raise ValueError(
"analysis.sample: your fitness function number " + str(i + 1) + " appears to be constant. Please remove it.")
self._compute_constraints()
self._scale_sample()
if self.dir is not None:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"\n-------------------------------------------------------------------------------", file=output)
print('SAMPLED [' + str(npoints) + '] POINTS VIA',
method, 'METHOD FOR THE SUBSEQUENT TESTS', file=output)
output.close()
def _scale_sample(self):
"""
Scales the sample in x and f after sampling, so all values are [0,1]. If constraints
have been computed, it also scales c to [-k,1-k] for k in [0,1].
"""
for i in range(self.npoints):
for j in range(self.dim):
self.points[i][j] -= self.lb[j]
self.points[i][j] /= (self.ub[j] - self.lb[j])
for j in range(self.f_dim):
self.f[i][j] -= self.f_offset[j]
self.f[i][j] /= self.f_span[j]
for j in range(self.c_dim):
self.c[i][j] /= self.c_span[j]
##########################################################################
# FITNESS DISTRIBUTION
##########################################################################
def f_distribution(self, percentile=[5, 10, 25, 50, 75], plot_f_distribution=True, plot_x_pcp=True, round_to=3):
"""
This function gives the user information about the f-distribution of the sampled search-space. All properties are shown per objective (each objective one column). To compute the fitness distribution (mean, std, percentile, skeweness and kurtosis),
the fitness values have been scaled between 0 and 1.
**USAGE:**
analysis.f_distribution([percentile=[0,25,50,75,100], show_plot=True, save_plot=False, scale=True, round_to=4])
* percentile: percentiles to show. Number or iterable. Defaults to [].
* plot_f_distribution: if True, the f-distribution plot will be generated and shown on screen or saved.
* plot_x_pcp: if True, the x-PCP plot will be generated and shown on screen or saved, using as interval limits the same percentiles demanded via argument percentile. Defaults to True.
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Fitness magnitude: minimum, maximum and peak-to-peak absolute values per objective.
* Fitness distribution parameters (computed on scaled dataset):
* Mean
* Standard deviation
* Percentiles specified
* Skew
* Kurtosis
* Number of peaks of f-distribution as probability density function.
**Shows or saves to file:**
* Plot of f-distribution as probability density function.
* X-PCP: pcp of chromosome of points in the sample grouped by fitness value ranges (percentiles).
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"--------------------------------------------------------------------------------", file=output)
print("F-DISTRIBUTION FEATURES", end=" ", file=output)
if self.f_dim > 1:
print("(" + str(self.f_dim) + " OBJECTIVES)", file=output)
else:
print("", file=output)
print(
"--------------------------------------------------------------------------------", file=output)
# print("Number of points sampled : ",[self.npoints],file=output)
print("Fitness magnitude :", file=output)
print(" Min : ",
[round(i, round_to) for i in self.f_offset], file=output)
print(" Max : ", [
round(i + j, round_to) for i, j in zip(self.f_span, self.f_offset)], file=output)
print(" Peak-to-peak (scale factor) : ",
[round(i, round_to) for i in self.f_span], file=output)
print("Fitness distribution :", file=output)
print(" Mean : ",
[round(i, round_to) for i in self._mean()], file=output)
print(" Standard deviation : ",
[round(i, round_to) for i in self._std()], file=output)
if percentile != []:
print(" Percentiles :", file=output)
if isinstance(percentile, (int, float)):
percentile = [percentile]
percentile_values = self._percentile(percentile)
for (j, k) in zip(percentile, percentile_values):
if j < 10:
print(" ", j, ": ",
[round(i, round_to) for i in k], file=output)
elif j == 100:
print(" ", j, ": ",
[round(i, round_to) for i in k], file=output)
else:
print(" ", j, ": ",
[round(i, round_to) for i in k], file=output)
print(" Skew : ",
[round(i, round_to) for i in self._skew()], file=output)
print(" Kurtosis : ", [
round(i, round_to) for i in self._kurtosis()], file=output)
print("Number of peaks of f-distribution : ",
self._n_peaks_f(), file=output)
if output is not None:
output.close()
if plot_f_distribution:
self.plot_f_distr()
if plot_x_pcp and percentile != []:
self.plot_x_pcp(percentile, percentile_values)
def _skew(self):
"""
**Returns** the skew of the f-distributions in the form of a list [fitness dimension].
**USAGE:**
analysis._skew()
"""
try:
from scipy.stats import skew
except ImportError:
raise ImportError(
"analysis._skew needs scipy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._skew: sampling first is necessary")
return skew(self.f).tolist()
def _kurtosis(self):
"""
**Returns** the kurtosis of the f-distributions in the form of a list [fitness dimension].
**USAGE:**
analysis._kurtosis()
"""
try:
from scipy.stats import kurtosis
except ImportError:
raise ImportError(
"analysis._kurtosis needs scipy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._kurtosis: sampling first is necessary")
return kurtosis(self.f).tolist()
def _mean(self):
"""
**Returns** the mean values of the f-distributions in the form of a list [fitness dimension].
**USAGE:**
analysis._mean()
"""
try:
from numpy import mean
except ImportError:
raise ImportError(
"analysis._mean needs numpy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._mean: sampling first is necessary")
return mean(self.f, 0).tolist()
def _var(self):
"""
**Returns** the variances of the f-distributions in the form of a list [fitness dimension].
**USAGE:**
analysis._var()
**NOTE:** not corrected, (averages with /N and not /(N-1))
"""
try:
from numpy import var
except ImportError:
raise ImportError(
"analysis._var needs numpy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._var: sampling first is necessary")
return var(self.f, 0).tolist()
def _std(self):
"""
**Returns** the standard deviations of the f-distributions in the form of a list
[fitness dimension].
**USAGE:**
analysis._std()
**NOTE:** not corrected (averages with /N and not /(N-1))
"""
try:
from numpy import std
except ImportError:
raise ImportError(
"analysis._std needs numpy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._std: sampling first is necessary")
return std(self.f, 0).tolist()
def _ptp(self):
"""
**Returns** the peak-to-peak range of the f-distributions in the form of a list [fitness dimension].
**USAGE:**
analysis._ptp()
"""
try:
from numpy import ptp
except ImportError:
raise ImportError(
"analysis._ptp needs numpy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._ptp: sampling first is necessary")
return ptp(self.f, 0).tolist()
def _percentile(self, p):
"""
**Returns** the percentile(s) of the f-distributions specified in p inthe form of a list
[length p][fitness dimension].
**USAGE:**
analysis._percentile(p=[0,10,25,50,75,100])
* p: percentile(s) to return. Can be a single int/float or a list.
"""
try:
from numpy import percentile
except ImportError:
raise ImportError(
"analysis._percentile needs numpy to run. Is it installed?")
if self.npoints == 0:
raise ValueError(
"analysis._percentile: sampling first is necessary")
if isinstance(p, (list, tuple)):
return [percentile(self.f, i, 0).tolist() for i in p]
else:
return percentile(self.f, p, 0).tolist()
def _n_peaks_f(self, nf=0):
"""
**Returns** the number of peaks of the f-distributions in the form of a list [fitness dimension].
**USAGE:**
analysis._n_peaks_f([nf=100])
* nf: discretisation of the f-distributions used to find their peaks. Defaults to npoints-1.
"""
try:
from numpy import array, zeros
from scipy.stats import gaussian_kde
except ImportError:
raise ImportError(
"analysis._n_peaks_f needs scipy, numpy and matplotlib to run. Are they installed?")
if self.npoints == 0:
raise ValueError(
"analysis._n_peaks_f: sampling first is necessary")
if nf == 0:
nf = self.npoints - 1
elif nf < 3:
raise ValueError(
"analysis._n_peaks_f: value of nf too small")
npeaks = []
for i in range(self.f_dim):
npeaks.append(0)
f = [a[i] for a in self.f]
kde = gaussian_kde(f)
df = self._ptp()[i] / nf
x = [min(f)]
for j in range(0, nf):
x.append(x[j] + df)
y = kde(x)
minidx = [0]
k = 1
for (a, b, c) in zip(y[0:nf - 1], y[1:nf], y[2:nf + 1]):
if a > b < c:
minidx.append(k)
k += 1
minidx.append(nf + 1)
mode_mass = [kde.integrate_box_1d(
x[minidx[j]], x[minidx[j + 1] - 1]) for j in range(len(minidx) - 1)]
for mode in mode_mass:
if mode > 0.01:
npeaks[i] += 1
return npeaks
##########################################################################
# FITNESS LINEARITY AND CONVEXITY
##########################################################################
def f_linearity_convexity(self, n_pairs=0, tol=10 ** (-8), round_to=3):
"""
This function gives the user information about the probability of linearity and convexity
of the fitness function(s). See analysis._p_lin_conv for a more thorough description of
these tests. All properties are shown per objective.
**USAGE:**
analysis.f_linearity_convexity([n_pairs=1000, tolerance=10**(-8), round_to=4])
* n_pairs: number of pairs of points used in the test. If set to 0, it will use as many pairs of points as points there are in the sample. Defaults to 0.
* tol: tolerance considered to rate the function as linear or convex between two points. Defaults to 10**(-8).
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Number of pairs of points used in test
* Probability of linearity [0,1].
* Probability of convexity [0,1].
* Mean deviation from linearity, scaled with corresponding fitness scale factor.
**NOTE:** integer variable values are fixed during each of the tests and linearity or convexity
is assessed as regards the continuous part of the chromosome.
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"-------------------------------------------------------------------------------", file=output)
print("PROBABILITY OF LINEARITY AND CONVEXITY", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
p = self._p_lin_conv(n_pairs, tol)
print("Number of pairs of points used : ",
[self.lin_conv_npairs], file=output)
print("Probability of linearity : ",
[round(i, round_to) for i in p[0]], file=output)
print("Probability of convexity : ",
[round(i, round_to) for i in p[1]], file=output)
print("Mean deviation from linearity : ",
[round(i, round_to) for i in p[2]], file=output)
if output is not None:
output.close()
def _p_lin_conv(self, n_pairs=0, threshold=10 ** (-10)):
"""
Tests the probability of linearity and convexity and the mean deviation from linearity of
the f-distributions obtained. A pair of points (X1,F1),(X2,F2) from the sample is selected
per test and a random convex combination of them is taken (Xconv,Fconv). For each objective,
if F(Xconv)=Fconv within tolerance, the function is considered linear there. Otherwise, if
F(Xconv)<Fconv, the function is considered convex. abs(F(Xconv)-Fconv) is the linear deviation.
The average of all tests performed gives the overall result.
**USAGE:**
analysis._p_lin_conv([n_pairs=100, threshold=10**(-10)])
* n_pairs: number of pairs of points used in the test. If set to 0, it will use as many pairs
of points as points there are in the sample. Defaults to 0.
* threshold: tolerance considered to rate the function as linear or convex between two points.
Defaults to 10**(-10).
**Returns** a tuple of length 3 containing:
* p_lin[fitness dimension]: probability of linearity [0,1].
* p_conv[fitness dimension]: probability of convexity [0,1].
* mean_dev[fitness dimension]: mean deviation from linearity as defined above (scaled with
corresponding fitness scaling factor).
**NOTE:** integer variables values are fixed during each of the tests and linearity or convexity
is evaluated as regards the continuous part of the chromosome.
"""
if self.npoints == 0:
raise ValueError(
"analysis._p_lin_conv: sampling first is necessary")
if self.cont_dim == 0:
raise ValueError(
"analysis._p_lin_conv: this test makes no sense for purely integer problems")
if n_pairs == 0:
n_pairs = self.npoints
try:
from numpy.random import random, randint
from numpy import array, multiply, divide, zeros
except ImportError:
raise ImportError(
"analysis._p_lin_conv needs numpy to run. Is it installed?")
p_lin = zeros(self.f_dim)
p_conv = zeros(self.f_dim)
mean_dev = zeros(self.f_dim)
for i in range(n_pairs):
i1 = randint(self.npoints)
i2 = randint(self.npoints)
while (i2 == i1):
i2 = randint(self.npoints)
r = random()
x = r * array(self.points[i1]) + (1 - r) * array(self.points[i2])
if self.cont_dim != self.dim:
x[self.cont_dim:] = self.points[i1][self.cont_dim:]
x = multiply(
array(x), array(self.ub) - array(self.lb)) + array(self.lb)
x2 = multiply(array(self.points[i2][:self.cont_dim] + self.points[i1][
self.cont_dim:]), array(self.ub) - array(self.lb)) + array(self.lb)
f2 = divide(
array(self.prob.objfun(x2)) - array(self.f_offset), self.f_span)
f_lin = r * array(self.f[i1]) + (1 - r) * array(f2)
else:
x = multiply(
array(x), array(self.ub) - array(self.lb)) + array(self.lb)
f_lin = r * array(self.f[i1]) + (1 - r) * array(self.f[i2])
f_real = divide(
array(self.prob.objfun(x)) - array(self.f_offset), self.f_span)
delta = f_lin - f_real
mean_dev += abs(delta)
for j in range(self.f_dim):
if abs(delta[j]) < threshold:
p_lin[j] += 1
elif delta[j] > 0:
p_conv[j] += 1
p_lin /= n_pairs
p_conv /= n_pairs
mean_dev /= n_pairs
self.lin_conv_npairs = n_pairs
return (list(p_lin), list(p_conv), list(mean_dev))
##########################################################################
# FITNESS REGRESSION
##########################################################################
def f_regression(self, degree=[], interaction=False, pred=True, tol=10 ** (-8), round_to=3):
"""
This function performs polynomial regressions on each objective function and measures the
precision of these regressions.
**USAGE:**
analysis.f_regression(degree=[1,1,2] [, interaction= [False,True,False], pred=True, tol=10**(-8),round_to=4])
* degree: integer (or list of integers) specifying the degree of the regression(s) to perform.
* interaction: bool (or list of bools of same length as degree). If True, interaction products of
first order will be added. These are all terms of order regression_degree+1 that involve at least 2
variables. If a single boolean is input, this will be applied to all regressions performed. Defaults
to False.
* pred: bool (or list of bools of same length as degree). If True, prediction propperties will also
be evaluated (their process of evaluation involves performing one regression per point in the sample).
These are the last 2 columns of the output table. If a single boolean is input, this will be applied
to all regressions performed. Defaults to True.
* tol: tolerance to consider a coefficient of the regression model as zero. Defaults to 10**(-8).
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Degree: Degree of the regression. (i) indicates the addition of interaction products.
* F: F-statistic value of the regression.
* R2: R-square coefficient.
* R2adj: adjusted R-square coefficient.
* RMSE: Root Mean Square Eror.
* R2pred: prediction R-square coefficient.
* PRESS-RMSE: prediction RMSE.
**REF:** http://www.cavs.msstate.edu/publications/docs/2005/01/741A%20comparative%20study.pdf
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
if isinstance(degree, int):
degree = [degree]
if len(degree) > 0:
if isinstance(interaction, bool):
interaction = [interaction] * len(degree)
if isinstance(pred, bool):
pred = [pred] * len(degree)
if len(degree) != len(interaction) or len(degree) != len(pred):
raise ValueError(
"analysis.f_regression: format of arguments is incorrect")
print(
"-------------------------------------------------------------------------------", file=output)
print("F-REGRESSION", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
properties = []
for deg, inter, predi in zip(degree, interaction, pred):
properties.append(self._regression_properties(
degree=deg, interaction=interaction, mode='f', pred=predi, tol=tol, w=None))
for f in range(self.f_dim):
if self.f_dim > 1:
print("OBJECTIVE " + str(f + 1) + " :", file=output)
spaces = [7, 17, 9, 9, 11, 9, 11]
print("DEGREE".center(spaces[0]), "F".center(spaces[1]), "R2".center(spaces[2]), "R2adj".center(spaces[
3]), "RMSE".center(spaces[4]), "R2pred".center(spaces[5]), "PRESS-RMSE".center(spaces[6]), file=output)
for deg, inter, prop in zip(degree, interaction, properties):
if inter:
print(
(str(deg) + '(i)').center(spaces[0]), end=' ', file=output)
else:
print(str(deg).center(spaces[0]), end=' ', file=output)
for i, s in zip(prop[f], spaces[1:]):
if i is None:
print("X".center(s), end=' ', file=output)
else:
string = str(i).split('e')
if len(string) > 1:
print(
(str(round(float(string[0]), round_to)) + 'e' + string[1]).center(s), end=' ', file=output)
else:
print(
str(round(i, round_to)).center(s), end=' ', file=output)
print(file=output)
if output is not None:
output.close()
def _regression_coefficients(self, degree, interaction=False, mode='f', A=None):
"""
Performs a polynomial regression on the sampled dataset and **Returns** the
coefficients of the polynomial model.
**USAGE:**
analysis._regression_coefficients(degree=2 [, interaction=True, mode='f'])
* regression_degree: degree of polynomial regression.
* interaction: if True, interaction products of first order will be added. These
are all terms of order regression_degree+1 that involve at least 2 variables.
Defaults to False.
* mode: 'f' to perform the regression on the fitness values dataset, 'c' to
perform it on the constraint function values dataset.
* A: matrix of polynomial terms as returned by _build_polynomial. This argument is
added for reusability, if set to None, it will be calculated. Defaults to None.
**Returns**:
* w[fitness/constraint dimension][number of coefficients]: coefficients of the
regression model, ordered as follows: highest order first, lexicographical.
"""
if self.npoints == 0:
raise ValueError(
"analysis._regression_coefficients: sampling first is necessary")
if degree < 1 or degree > 10:
raise ValueError(
"analysis._regression_coefficients: regression_degree needs to be [1,10]")
if mode == 'c':
if self.c_dim == 0:
raise ValueError(
"analysis._regression_coefficients: selected mode 'c' for unconstrained problem")
elif self.c == []:
raise ValueError(
"analysis._regression_coefficients: computing constraints first is necessary")
else:
if mode != 'f':
raise ValueError(
"analysis._regression_coefficients: mode needs to be 'f' or 'c' ")
try:
from numpy.linalg import lstsq
from numpy import array
except ImportError:
raise ImportError(
"analysis._regression_coefficients needs numpy to run. Is it installed?")
if A is None:
A = self._build_polynomial(self.points, degree, interaction)
if mode == 'f':
l = self.f_dim
else:
l = self.c_dim
w = []
for i in range(l):
if mode == 'f':
b = [self.f[j][i] for j in range(self.npoints)]
else:
b = [self.c[j][i] for j in range(self.npoints)]
b = array(b)
temp = lstsq(A, b)[0]
w.append(list(temp))
return w
def _regression_properties(self, degree, interaction=False, mode='f', pred=True, tol=10 ** (-8), w=None):
"""
Tests the precision and extracts the properties of a regression model fitting the dataset.
**USAGE:**
analysis._regression_properties(degree=1 [ ,interaction=False,mode='f', pred=False, tol=10**(-8), w=None])
* degree: degree of regression.
* interaction: if True, interaction products of first order will be added. These are all terms
of order regression_degree+1 that involve at least 2 variables. Defaults to False.
* mode: 'f' for a regression model of the fitness values dataset, 'c' for the constraint function
values dataset. Defaults to 'f'.
* pred: if True, prediction properties will also be evaluated by calling _regression_press.
Evaluation of these properties involves fitting of as many regressions as points in the dataset.
Defaults to True.
* tol: tolerance to consider a coefficient of the model as zero. Defaults to 10**(-8).
* w: coefficients of the regression model whose propperties one wants to assess. This argument
is added for reusability, if set to None they will be calculated. Defaults to None.
**Returns** list of size [fitness/constraint dimension][6] containing, per fitness/constraint function:
* F: F-statistic value of the regression.
* R2: R-square coefficient.
* R2adj: adjusted R-square coefficient.
* RMSE: Root Mean Square Eror.
* R2pred: prediction R-square coefficient.
* PRESS-RMSE: prediction RMSE.
**REF:** http://www.cavs.msstate.edu/publications/docs/2005/01/741A%20comparative%20study.pdf
"""
try:
from numpy import var, array, zeros
except ImportError:
raise ImportError(
"analysis._regression_properties needs numpy to run. Is it installed?")
if mode == 'f':
y = self.f
y_dim = self.f_dim
elif mode == 'c':
y = self.c
y_dim = self.c_dim
if w is None:
w = self._regression_coefficients(degree, interaction, mode)
sst = self.npoints * var(y, 0)
sse = sum((array(self._regression_predict(
w, self.points, degree, interaction)) - array(y)) ** 2, 0)
output = []
if pred:
press = self._regression_press(degree, interaction, mode)
for i in range(y_dim):
p = 0
tmp = []
for j in w[i][:-1]:
if abs(j) > tol:
p += 1
tmp.append(
((sst[i] - sse[i]) / sse[i]) * (self.npoints - p - 1) / p) # F
tmp.append(1 - sse[i] / sst[i]) # R2
# R2adj
tmp.append(
1 - (1 - tmp[1]) * (self.npoints - 1) / (self.npoints - p - 1))
tmp.append((sse[i] / (self.npoints - p - 1)) ** 0.5) # RMSE
if pred:
tmp.append(1 - press[i] / sst[i]) # R2pred
tmp.append((press[i] / (self.npoints)) ** 0.5) # PRESS-RMSE
else:
tmp.extend([None, None])
output.append(tmp)
return output
def _regression_press(self, degree, interaction=False, mode='f'):
"""
Calculates the PRESS of a regression model on the dataset. This involves fitting of as
many models as points there are in the dataset.
**USAGE:**
analysis._regression_press(degree=1 [,interaction=False, mode='c'])
* degree: of the regression
* interaction: if True, interaction products of first order will be added. These are all terms
of order regression_degree+1 that involve at least 2 variables. Defaults to False.
* mode: 'f' for a regression model of the fitness values dataset, 'c' for the constraint function
values dataset. Defaults to 'f'.
**Returns**:
* PRESS [fitness/constraint dimension].
"""
try:
from numpy import array, zeros
except ImportError:
raise ImportError(
"analysis._regression_press needs numpy to run. Is it installed?")
if mode == 'f':
y_dim = self.f_dim
y = self.f
elif mode == 'c':
y_dim = self.c_dim
y = self.c
press = zeros(y_dim)
A = self._build_polynomial(self.points, degree, interaction)
self.npoints -= 1
for i in range(self.npoints + 1):
x = self.points.pop(0)
a = A.pop(0)
yreal = y.pop(0)
w = self._regression_coefficients(degree, interaction, mode, A)
ypred = array(
self._regression_predict(w, [x], degree, interaction))[0]
press = press + (ypred - yreal) ** 2
A.append(a)
self.points.append(x)
y.append(yreal)
self.npoints += 1
return press.tolist()
def _build_polynomial(self, x, degree, interaction=False):
"""
Builds the polynomial base necessary to fit or evaluate a regression model.
**USAGE:**
analysis._build_polynomial(x=analysis.points, degree=2 [,interaction=True])
* x [number of points][dimension]: chromosome (or list of chromosomes) of the point (or points)
whose polynomial is built.
* degree: degree of the polynomial.
* interaction: if True, interaction products of first order will be added. These are all terms
of order regression_degree+1 that involve at least 2 variables. Defaults to False.
**Returns**:
* A[number of points][number of terms in the polynomial].
"""
from itertools import combinations_with_replacement
if interaction:
coef = list(
combinations_with_replacement(range(self.dim), degree + 1))
for i in range(self.dim):
coef.remove((i,) * (degree + 1))
else:
coef = []
for i in range(degree, 1, -1):
coef = coef + \
list(combinations_with_replacement(range(self.dim), i))
n_coef = len(coef)
A = []
for i in range(len(x)):
c = []
for j in range(n_coef):
prod = 1
for k in range(len(coef[j])):
prod *= x[i][coef[j][k]]
c.append(prod)
A.append(c + x[i] + [1])
return A
def _regression_predict(self, coefficients, x, degree, interaction=False):
"""
Routine that, given the coefficients of a regression model and a point, calculates the
predicted value for that point.
**USAGE:**
analysis._regression_predict(coefficients=[1,1,1,1], x=[[0,0,0],[1,1,1]], degree=1 [,interaction=False])
* coefficients[fitness/constraint dimension][number of coefficients]: of the regression model
* x[number of points][dimension]: chromosome of point to evaluate.
* degree: of the regression model.
* interaction: if True, interaction products of first order will be added. These are all terms
of order regression_degree+1 that involve at least 2 variables. Defaults to False.
**Returns**:
* prediction[number of points][fitness/constraint dimension].
"""
try:
from numpy import array, dot, transpose
except ImportError:
raise ImportError(
"analysis._regression_predict needs numpy to run. Is it installed?")
polynomial = array(self._build_polynomial(x, degree, interaction))
prediction = dot(polynomial, transpose(coefficients))
return prediction.tolist()
##########################################################################
# OBJECTIVES CORRELATION
##########################################################################
def f_correlation(self, tc=0.95, tabs=0.1, round_to=3):
"""
This function performs first dimensionality reduction via PCA on the fitness sample of
multi-objective problems following the algorithm proposed in the reference. It also gives the user other informations about objective
function correlation for a possible fitness dimensionality reduction.
**REF:** Deb K. and Saxena D.K, On Finding Pareto-Optimal Solutions Through Dimensionality
Reduction for Certain Large-Dimensional Multi-Objective Optimization Problems, KanGAL
Report No. 2005011, IIT Kanpur, 2005.
**USAGE:**
analysis.f_correlation([tc=0.95, tabs=0.1, round_to=4])
* tc: threshold cut. When the cumulative contribution of the eigenvalues absolute value
equals this fraction of its maximum value, the reduction algorithm stops. A higher
threshold cut means less reduction (see reference). Defaults to 0.95.
* tabs: absolute tolerance. A Principal Component is treated differently if the absolute
value of its corresponding eigenvalue is lower than this value (see reference). Defaults
to 0.1.
**Prints to screen or file:**
* Critical objectives from first PCA: objectives not to be eliminated of the problem.
* Eigenvalues, relative contribution, eigenvectors (of the objective correlation matrix).
* Objective correlation matrix.
"""
from numpy import asarray, ones
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"--------------------------------------------------------------------------------", file=output)
print("OBJECTIVES CORRELATION ", file=output)
print(
"--------------------------------------------------------------------------------", file=output)
if self.f_dim == 1:
print("This is a single-objective problem.", file=output)
else:
obj_corr = self._f_correlation()
critical_obj = self._perform_f_pca(obj_corr, tc=tc, tabs=tabs)
print("Critical objectives from first PCA : ",
[int(i + 1) for i in critical_obj], file=output)
print("Eigenvalues".center(12), "Relative contribution".center(
23), "Eigenvectors".center(45), file=output)
total_ev = sum(obj_corr[1])
for i in range(self.f_dim):
print(str(round(obj_corr[1][i], round_to)).center(12), (str(round(100 * abs(obj_corr[1][i]) / sum([abs(e) for e in obj_corr[
1]]), round_to)) + '%').center(23), str([round(val, round_to) for val in obj_corr[2][i]]).center(45), file=output)
print("Objective correlation matrix : ", file=output)
for i in range(self.f_dim):
print(" [", end='', file=output)
for j in obj_corr[0][i]:
print(
str(round(j, round_to)).center(8), end='', file=output)
print("]", file=output)
if output is not None:
output.close()
def _f_correlation(self):
"""
Calculates the objective correlation matrix and its eigenvalues and eigenvectors. Only for
multi-objective problems.
**USAGE:**
analysis._f_correlation()
**Returns** tuple of 3 containing:
* M[search dimension][search dimension]: correlation matrix.
* eval[search dimension]: its eigenvalues.
* evect[search dimension][search dimension]: its eigenvectors.
"""
if self.npoints == 0:
raise ValueError(
"analysis._f_correlation: sampling first is necessary")
if self.f_dim < 2:
raise ValueError(
"analysis._f_correlation: this test makes no sense for single-objective optimisation")
try:
from numpy import corrcoef, transpose, dot
from numpy.linalg import eigh
except ImportError:
raise ImportError(
"analysis._f_correlation needs numpy to run. Is it installed?")
M = corrcoef(self.f, rowvar=0)
e = eigh(M)
return (M.tolist(), e[0].tolist(), transpose(e[1]).tolist())
def _perform_f_pca(self, obj_corr=None, tc=0.95, tabs=0.1):
"""
Performs first Objective Reduction using Principal Component Analysis on the objective
correlation matrix as defined in the reference and **Returns** a list of the relevant objectives
according to this procedure. Only for multi-objective problems.
**USAGE:** analysis._perform_f_pca([obj_corr=None, tc=0.95, tabs=0.1])
* obj_corr: objective correlation matrix, its eigenvalues and eigenvectors, in the form of the output of analysis._f_correlation. This parameter is added for reusability (if None, these will be calculated). Defaults to None.
* tc: threshold cut. When the cumulative contribution of the eigenvalues absolute value equals this fraction of its maximum value, the reduction algorithm stops. A higher threshold cut means less reduction (see reference). Defaults to 0.95.
* tabs: absolute tolerance. A Principal Component is treated differently if the absolute value of its corresponding eigenvalue is lower than this value (see reference). Defaults to 0.1.
**Returns**:
* Keep: list of critical objectives or objectives to keep (zero-based).
**REF:** Deb K. and Saxena D.K, On Finding Pareto-Optimal Solutions Through Dimensionality
Reduction for Certain Large-Dimensional Multi-Objective Optimization Problems, KanGAL Report
No. 2005011, IIT Kanpur, 2005.
"""
try:
from numpy import asarray, corrcoef, transpose, dot, argmax, argmin
from numpy.linalg import eigh
from itertools import combinations
except ImportError:
raise ImportError(
"analysis._perform_f_pca needs numpy to run. Is it installed?")
if obj_corr is None:
obj_corr = self._f_correlation()
M = obj_corr[0]
eigenvals = asarray(obj_corr[1])
eigenvects = asarray(obj_corr[2])
# eigenvalue elimination of redundant objectives
contributions = (
asarray(abs(eigenvals)) / sum(abs(eigenvals))).tolist()
l = len(eigenvals)
eig_order = [
y for (x, y) in sorted(zip(contributions, range(l)), reverse=True)]
cumulative_contribution = 0
keep = []
first = True
for i in eig_order:
index_p, index_n = argmax(eigenvects[i]), argmin(eigenvects[i])
p, n = eigenvects[i][index_p], eigenvects[i][index_n]
if first:
first = False
if p > 0:
if all([k != index_p for k in keep]):
keep.append(index_p)
if n < 0:
if all([k != index_n for k in keep]):
keep.append(index_n)
else:
keep = range(l)
break
elif abs(eigenvals[i]) < tabs:
if abs(p) > abs(n):
if all([k != index_p for k in keep]):
keep.append(index_p)
else:
if all([k != index_n for k in keep]):
keep.append(index_n)
else:
if n >= 0:
if all([k != index_p for k in keep]):
keep.append(index_p)
elif p <= 0:
keep = range(l)
break
else:
if abs(n) >= p >= 0.9 * abs(n):
if all([k != index_p for k in keep]):
keep.append(index_p)
if all([k != index_n for k in keep]):
keep.append(index_n)
elif p < 0.9 * abs(n):
if all([k != index_n for k in keep]):
keep.append(index_n)
else:
if abs(n) >= 0.8 * p:
if all([k != index_p for k in keep]):
keep.append(index_p)
if all([k != index_n for k in keep]):
keep.append(index_n)
else:
if all([k != index_p for k in keep]):
keep.append(index_p)
cumulative_contribution += contributions[i]
if cumulative_contribution >= tc or len(keep) == l:
break
# correlation elimination of redundant objectives
if len(keep) > 2:
c = list(combinations(keep, 2))
for i in range(len(c)):
if all([x * y > 0 for x, y in zip(M[c[i][0]], M[c[i][1]])]) and any([k == c[i][1] for k in keep]) and any([k == c[i][0] for k in keep]):
if keep.index(c[i][0]) < keep.index(c[i][1]):
keep.remove(c[i][1])
else:
keep.remove(c[i][0])
return sorted(keep)
##########################################################################
# FITNESS SENSITIVITY
##########################################################################
def f_sensitivity(self, hessian=True, plot_gradient_sparsity=True, plot_pcp=True, plot_inverted_pcp=True, sample_size=0, h=0.01, conv_tol=10 ** (-6), zero_tol=10 ** (-8), tmax=15, round_to=3):
"""
This function evaluates the jacobian matrix and hessian tensor in a subset of the sample
in order to extract information about sensitivity of the fitness function(s) with respect
to the search variables. All results are presented per objective and scaled with the
corresponding scale factors.
**USAGE:**
analysis.f_sensitivity([hessian=True, plot_gradient_sparsity=True, plot_pcp=True, plot_inverted_pcp=True, sample_size=0, h=0.01,conv_tol=10**(-6), zero_tol=10**(-8), tmax=15, round_to=3])
* hessian: if True, the hessian tensor and its properties will also be evaluated. Defaults to True.
* plot_gradient_sparsity: if True, the Jacobian matrix sparsity plot will be generated.
* plot_pcp: if True, the gradient PCP (with chromosome in X-axis) will be generated. Defaults to True.
* plot_inverted_pcp: if True, the gradient PCP (with F in X-axis) will be generated. Defaults to True.
* sample_size: number of points to calculate the gradient or hessian at. If set to 0, all the sample will be picked. Defaults to 0.
* h: initial fraction of the search space span used as dx for evaluation of derivatives.
* conv_tol: convergence parameter for Richardson extrapolation method. Defaults to 10**(-6).
* zero_tol: tolerance for considering a component nule during the sparsity test. Defaults to 10**(-8).
* tmax: maximum of iterations for Richardson extrapolation. Defaults to 15.
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Number of points used.
* Percentiles 0, 25, 50, 75 and 100 of the distribution of:
* Gradient norm.
* abs(dFx)_max/abs(dFx)_min: ratio of maximum to minimum absolute value of partial derivatives of the fitness function gradient.
* Hessian conditioning: ratio of maximum to minimum absolute value of eigenvalues of the fitness function hessian matrix.
* Gradient sparsity: fraction of components of the gradient that are zero at every point.
* Fraction of points with Positive Definite hessian.
* Fraction of points with Positive Semi-Definite (and not Positive-Definite) hessian.
**Shows or saves to file:**
* Gradient/Jacobian sparsity plot.
* Gradient/Jacobian PCP with chromosome in X-axis.
* Gradient/Jacobian PCP with fitness in X-axis.
**NOTE:** this function calls analysis._get_gradient and analysis._get_hessian. Both these
functions store a great number of properties as class attributes. See their respective
entries for more information about these attributes.
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"-------------------------------------------------------------------------------", file=output)
print("F-SENSITIVITY ", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
self._get_gradient(sample_size=sample_size, h=h,
grad_tol=conv_tol, zero_tol=zero_tol, tmax=tmax, mode='f')
g = self._grad_properties(tol=zero_tol, mode='f')
self._get_hessian(
sample_size=sample_size, h=h, hess_tol=conv_tol, tmax=tmax)
h = self._hess_properties(tol=zero_tol)
print("Number of points used : ", [self.grad_npoints], file=output)
for f in range(self.f_dim):
if self.f_dim > 1:
print("OBJECTIVE " + str(f + 1) + " :", file=output)
print(" Percentiles : ".ljust(28), "0".center(9), "25".center(9), "50".center(
9), "75".center(9), "100".center(9), "", sep="|", file=output)
print(" Gradient norm :".ljust(28), str(round(g[0][f][0], round_to)).center(9), str(round(g[0][f][1], round_to)).center(9), str(round(
g[0][f][2], round_to)).center(9), str(round(g[0][f][3], round_to)).center(9), str(round(g[0][f][4], round_to)).center(9), "", sep="|", file=output)
print(" |dFx|_max/|dFx|_min :".ljust(28), str(round(g[1][f][0], round_to)).center(9), str(round(g[1][f][1], round_to)).center(9), str(round(
g[1][f][2], round_to)).center(9), str(round(g[1][f][3], round_to)).center(9), str(round(g[1][f][4], round_to)).center(9), "", sep="|", file=output)
if hessian:
print(" Hessian conditioning :".ljust(28), str(round(h[0][f][0], round_to)).center(9), str(round(h[0][f][1], round_to)).center(9), str(round(
h[0][f][2], round_to)).center(9), str(round(h[0][f][3], round_to)).center(9), str(round(h[0][f][4], round_to)).center(9), "", sep="|", file=output)
print(" Gradient sparsity : ",
"[" + str(round(self.grad_sparsity, round_to)) + "]", file=output)
print(" Fraction of points with PD hessian : ",
"[" + str(round(h[1][f], round_to)) + "]", file=output)
print(" Fraction of points with PSD (not PD) hessian : ",
"[" + str(round(h[2][f], round_to)) + "]", file=output)
else:
print(" Gradient sparsity : ",
"[" + str(round(self.grad_sparsity, round_to)) + "]", file=output)
if output is not None:
output.close()
if plot_gradient_sparsity:
self.plot_gradient_sparsity(zero_tol=zero_tol, mode='f')
if plot_pcp and self.cont_dim > 1:
self.plot_gradient_pcp(mode='f', invert=False)
if plot_inverted_pcp and self.f_dim > 1:
self.plot_gradient_pcp(mode='f', invert=True)
def _get_gradient(self, sample_size=0, h=0.01, grad_tol=0.000001, zero_tol=0.000001, tmax=15, mode='f'):
"""
Routine that selects points from the sample and calculates the Jacobian matrix in them by
calling richardson_gradient. Also computes its sparsity.
**USAGE:**
analysis._get_gradient([sample_size=100, h=0.01, grad_tol=0.000001, zero_tol=0.000001])
* sample_size: number of points from sample to calculate gradient at. If set to 0, all points
will be used. Defaults to 0.
* zero_tol: sparsity tolerance. For a position of the jacobian matrix to be considered a zero,
its mean absolute value has to be <=zero_tol.
The rest of parameters are passed to _richardson_gradient.
**The following parameters are stored as attributes:**
* analysis.grad_npoints: number of points where jacobian is computed.
* analysis.grad_points[grad_npoints]: indexes of these points in sample list.
* analysis.grad[grad_npoints][fitness dimension][continuous search dimension]:
jacobian matrixes computed.
* analysis.average_abs_gradient[fitness dimension][continuous search dimension]: mean absolute
value of the terms of each jacobian matrix computed.
* analysis.grad_sparsity: fraction of zeros in jacobian matrix (zero for all points).
**NOTE:** all integer variables are ignored for this test.
"""
if self.npoints == 0:
raise ValueError(
"analysis._get_gradient: sampling first is necessary")
if mode == 'f':
dim = self.f_dim
elif mode == 'c':
if self.c_dim == 0:
raise ValueError(
"analysis._get_gradient: mode 'c' selected for unconstrained problem")
else:
dim = self.c_dim
else:
raise ValueError(
"analysis._get_gradient: select a valid mode 'f' or 'c'")
try:
from numpy.random import randint
from numpy import nanmean, asarray
except ImportError:
raise ImportError(
"analysis._get_gradient needs numpy to run. Is it installed?")
if sample_size <= 0 or sample_size >= self.npoints:
grad_points = range(self.npoints)
grad_npoints = self.npoints
else:
grad_npoints = sample_size
grad_points = [randint(self.npoints)
for i in range(sample_size)] # avoid repetition?
grad = []
grad_sparsity = 0
for i in grad_points:
grad.append(self._richardson_gradient(
x=self.points[i], h=h, grad_tol=grad_tol, tmax=tmax, mode=mode))
average_abs_gradient = nanmean(abs(asarray(grad)), 0)
for i in range(dim):
for j in range(self.cont_dim):
if abs(average_abs_gradient[i][j]) <= zero_tol:
grad_sparsity += 1.
grad_sparsity /= (self.cont_dim * dim)
if mode == 'f':
self.grad_npoints = grad_npoints
self.grad_points = grad_points
self.grad = grad
self.average_abs_gradient = average_abs_gradient
self.grad_sparsity = grad_sparsity
else:
self.c_grad_npoints = grad_npoints
self.c_grad_points = grad_points
self.c_grad = grad
self.average_abs_c_gradient = average_abs_gradient
self.c_grad_sparsity = grad_sparsity
def _richardson_gradient(self, x, h, grad_tol, tmax=15, mode='f'):
"""
Evaluates jacobian matrix in point x of the search space by means of Richardson Extrapolation.
**USAGE:**
analysis._richardson_gradient(x=(a point's chromosome), h=0.01, grad_tol=0.000001 [, tmax=15])
* x: list or tuple containing the chromosome of a point in the search space, where the Jacobian
Matrix will be evaluated.
* h: initial dx taken for evaluation of derivatives.
* grad_tol: tolerance for convergence.
* tmax: maximum of iterations. Defaults to 15.
**Returns** jacobian matrix at point x as a list [fitness dimension][continuous search dimension].
**NOTE:** all integer variables are ignored for this test.
"""
from numpy import array, zeros, amax
if mode == 'f':
function = self.prob.objfun
span = self.f_span
dim = self.f_dim
elif mode == 'c':
function = self.prob.compute_constraints
span = self.c_span
dim = self.c_dim
d = [[zeros([dim, self.cont_dim])], []]
hh = 2 * h
err = 1
t = 0
# descale
x = array(x) * (array(self.ub) - array(self.lb)) + array(self.lb)
while (err > grad_tol and t < tmax):
hh /= 2
for i in range(self.cont_dim):
xu = x.tolist()
xd = x.tolist()
xu[i] += hh * (self.ub[i] - self.lb[i])
xd[i] -= hh * (self.ub[i] - self.lb[i])
if xu[i] > self.ub[i] or xd[i] < self.lb[i]:
tmp = zeros(dim)
else:
# rescale
tmp = (array(function(xu)) - array(function(xd))) / \
(2 * hh * array(span))
for j in range(dim):
d[t % 2][0][j][i] = tmp[j]
for k in range(1, t + 1):
d[t % 2][k] = d[t % 2][k - 1] + \
(d[t % 2][k - 1] - d[(t + 1) % 2][k - 1]) / (4 ** k - 1)
if t > 0:
err = amax(abs(d[t % 2][t] - d[(t + 1) % 2][t - 1]))
d[(t + 1) %
2].extend([zeros([dim, self.cont_dim]), zeros([dim, self.cont_dim])])
t += 1
return d[(t + 1) % 2][t - 1].tolist()
def _get_hessian(self, sample_size=0, h=0.01, hess_tol=0.000001, tmax=15):
"""
Routine that selects points from the sample and calculates the Hessian 3rd-order tensor in
them by calling richardson_hessian.
**USAGE:**
analysis._get_hessian([sample_size=100, h=0.01, hess_tol=0.000001])
* sample_size: number of points from sample to calculate hessian at. If set to 0, all points
will be used. Defaults to 0.
* The rest of parameters are passed to _richardson_hessian.
**The following parameters are stored as attributes:**
* analysis.hess_npoints: number of points where hessian is computed.
* analysis.hess_points[hess_npoints]: indexes of these points in sample list.
* analysis.hess[hess_npoints][fitness dimension][continuous search dimension][continuous
search dimension]: hessian 3rd-order tensors computed.
**NOTE:** all integer variables are ignored for this test.
"""
if self.npoints == 0:
raise ValueError(
"analysis._get_hessian: sampling first is necessary")
try:
from numpy.random import randint
except ImportError:
raise ImportError(
"analysis._get_hessian needs numpy to run. Is it installed?")
if sample_size <= 0 or sample_size >= self.npoints:
self.hess_points = range(self.npoints)
self.hess_npoints = self.npoints
else:
self.hess_npoints = sample_size
# avoid repetition?
self.hess_points = [randint(self.npoints)
for i in range(sample_size)]
self.hess = []
for i in self.hess_points:
self.hess.append(
self._richardson_hessian(x=self.points[i], h=h, hess_tol=hess_tol, tmax=tmax))
def _richardson_hessian(self, x, h, hess_tol, tmax=15):
"""
Evaluates hessian 3rd-order tensor in point x of the search space by means of Richardson
Extrapolation.
**USAGE:**
analysis._richardson_hessian(x=(a point's chromosome), h=0.01, hess_tol=0.000001 [, tmax=15])
* x: list or tuple containing the chromosome of a point in the search space, where the Hessian
3rd-order tensor will be evaluated.
* h: initial dx taken for evaluation of derivatives.
* hess_tol: tolerance for convergence.
* tmax: maximum of iterations. Defaults to 15.
**Returns** hessian tensor at point x as a list [fitness dimension][continuous search dimension]
[continuous search dimension].
**NOTE:** all integer variables are ignored for this test.\n
"""
from numpy import array, zeros, amax
from itertools import combinations_with_replacement
ind = list(combinations_with_replacement(range(self.cont_dim), 2))
n_ind = len(ind)
d = [[zeros([self.f_dim, n_ind])], []]
hh = 2 * h
err = 1
t = 0
x = array(x) * (array(self.ub) - array(self.lb)) + array(self.lb)
while (err > hess_tol and t < tmax):
hh /= 2
for i in range(n_ind):
xu = x.tolist()
xd = x.tolist()
xuu = x.tolist()
xdd = x.tolist()
xud = x.tolist()
xdu = x.tolist()
if ind[i][0] == ind[i][1]:
xu[ind[i][0]] = xu[ind[i][0]] + hh * \
(self.ub[ind[i][0]] - self.lb[ind[i][0]])
xd[ind[i][0]] -= hh * \
(self.ub[ind[i][0]] - self.lb[ind[i][0]])
if xu[ind[i][0]] > self.ub[ind[i][0]] or xd[ind[i][0]] < self.lb[ind[i][0]]:
tmp = zeros(self.f_dim)
else:
# rescale
tmp = (array(self.prob.objfun(xu)) - 2 * array(self.prob.objfun(x)) +
array(self.prob.objfun(xd))) / ((hh ** 2) * array(self.f_span))
else:
xuu[ind[i][0]] += hh * \
(self.ub[ind[i][0]] - self.lb[ind[i][0]])
xuu[ind[i][1]] += hh * \
(self.ub[ind[i][1]] - self.lb[ind[i][1]])
xdd[ind[i][0]] -= hh * \
(self.ub[ind[i][0]] - self.lb[ind[i][0]])
xdd[ind[i][1]] -= hh * \
(self.ub[ind[i][1]] - self.lb[ind[i][1]])
xud[ind[i][0]] += hh * \
(self.ub[ind[i][0]] - self.lb[ind[i][0]])
xud[ind[i][1]] -= hh * \
(self.ub[ind[i][1]] - self.lb[ind[i][1]])
xdu[ind[i][0]] -= hh * \
(self.ub[ind[i][0]] - self.lb[ind[i][0]])
xdu[ind[i][1]] += hh * \
(self.ub[ind[i][1]] - self.lb[ind[i][1]])
limit_test = [xuu[ind[i][0]] > self.ub[ind[i][0]], xuu[ind[i][1]] > self.ub[ind[i][1]], xdd[ind[i][0]] < self.lb[ind[i][0]], xdd[ind[i][1]] < self.lb[ind[i][1]],
xud[ind[i][0]] > self.ub[ind[i][0]], xud[ind[i][1]] < self.lb[ind[i][1]], xdu[ind[i][0]] < self.lb[ind[i][0]], xdu[ind[i][1]] > self.ub[ind[i][1]]]
if any(limit_test):
tmp = zeros(self.f_dim)
else:
# rescale
tmp = (array(self.prob.objfun(xuu)) - array(self.prob.objfun(xud)) - array(
self.prob.objfun(xdu)) + array(self.prob.objfun(xdd))) / (4 * hh * hh * array(self.f_span))
for j in range(self.f_dim):
d[t % 2][0][j][i] = tmp[j]
for k in range(1, t + 1):
d[t % 2][k] = d[t % 2][k - 1] + \
(d[t % 2][k - 1] - d[(t + 1) % 2][k - 1]) / (4 ** k - 1)
if t > 0:
err = amax(abs(d[t % 2][t] - d[(t + 1) % 2][t - 1]))
d[(t + 1) %
2].extend([zeros([self.f_dim, n_ind]), zeros([self.f_dim, n_ind])])
t += 1
hessian = []
for i in range(self.f_dim):
mat = zeros([self.cont_dim, self.cont_dim])
for j in range(n_ind):
mat[ind[j][0]][ind[j][1]] = d[(t + 1) % 2][t - 1][i][j]
mat[ind[j][1]][ind[j][0]] = d[(t + 1) % 2][t - 1][i][j]
hessian.append(mat.tolist())
return hessian
def _grad_properties(self, tol=10 ** (-8), mode='f'):
"""
Computes some properties of the gradient once it is stored as an attribute.
**USAGE:**
analysis._grad_properties([tol=10**(-8), mode='f'])
* tol: tolerance to consider a partial derivative value as zero. Defaults to 10**(-8).
* mode: 'f'/'c' to act on fitness function/constraint function jacobian matrix.
**Returns** tuple of 2 containing:
* norm_quartiles[fitness/constraint dimension][5]: percentiles 0,25,50,75,100 of
gradient norm (per fitness/constraint function)
* cond_quartiles[fitness/constraint dimension][5]: percentiles 0,25,50,75,100 of
ratio of maximum to minimum absolute value of partial derivatives in that gradient
(per fitness/constraint function).
"""
if mode == 'f':
if self.grad_npoints == 0:
raise ValueError(
"analysis._grad_properties: sampling and getting gradient first is necessary")
dim = self.f_dim
grad = self.grad
npoints = self.grad_npoints
elif mode == 'c':
if self.c_dim == 0:
raise ValueError(
"analysis._grad_properties: mode 'c' selected for unconstrained problem")
if self.c_grad_npoints == 0:
raise ValueError(
"analysis._grad_properties: sampling and getting c_gradient first is necessary")
else:
dim = self.c_dim
grad = self.c_grad
npoints = self.c_grad_npoints
else:
raise ValueError(
"analysis._grad_properties: select a valid mode 'f' or 'c'")
try:
from numpy import percentile
from numpy.linalg import norm
except ImportError:
raise ImportError(
"analysis._grad_properties needs numpy to run. Is it installed?")
cond_quartiles = []
norm_quartiles = []
for f in range(dim):
cond = []
norms = []
for p in range(npoints):
tmp = [abs(i) for i in grad[p][f]]
norms.append(norm(grad[p][f]))
if min(tmp) > tol:
cond.append(max(tmp) / min(tmp))
else:
cond.append(float('inf'))
cond_quartiles.append([])
norm_quartiles.append([])
for i in range(0, 101, 25):
cond_quartiles[f].append(percentile(cond, i).tolist())
norm_quartiles[f].append(percentile(norms, i).tolist())
return (norm_quartiles, cond_quartiles)
def _hess_properties(self, tol=10 ** (-8)):
"""
Computes some properties of the hessian once it is stored as an attribute.
**USAGE:**
analysis._hess_properties([tol=10**(-8)])
* tol: tolerance to consider an eigenvalue as zero. Defaults to 10**(-8).
**Returns** tuple of 3:
* cond_quartiles[fitness dimension][5]: percentiles 0,25,50,75,100 of ratio of maximum to
minimum absolute value of eigenvalues in that hessian matrix (per fitness function).
* pd[fitness dimension]: fraction of points of sample with positive-definite hessian.
* psd[fitness dimension]: fraction of points of sample with positive-semidefinite
(and not positive-definite) hessian matrix.
"""
if self.hess_npoints == 0:
raise ValueError(
"analysis._hess_properties: sampling and getting gradient first is necessary")
if self.dim == 1:
raise ValueError(
"analysis._hess_properties: test not applicable to univariate problems")
try:
from numpy import percentile
from numpy.linalg import eigh
except ImportError:
raise ImportError(
"analysis._hess_properties needs numpy to run. Is it installed?")
pd = [0] * self.f_dim
psd = [0] * self.f_dim
cond_quartiles = []
for f in range(self.f_dim):
cond = []
for p in range(self.hess_npoints):
e = eigh(self.hess[p][f])[0]
eu = max(e)
ed = min(e)
eabs = [abs(i) for i in e]
if min(eabs) > tol:
cond.append(max(eabs) / min(eabs))
else:
cond.append(float('inf'))
if ed >= tol:
pd[f] += 1.
if abs(ed) < tol:
psd[f] += 1.
pd[f] /= self.hess_npoints
psd[f] /= self.hess_npoints
cond_quartiles.append([])
for i in range(0, 101, 25):
cond_quartiles[f].append(percentile(cond, i).tolist())
return (cond_quartiles, pd, psd)
##########################################################################
# CONSTRAINTS FEASIBILITY
##########################################################################
def c_feasibility(self, tol=10 ** (-8), round_to=3):
"""
This function gives the user information about the effectivity and possible redundancy of
the constraints of the problem.
**USAGE:**
analysis.c_feasibility([tol=10**(-8), round_to=4])
* n_pairs: number of pairs of points used to test probability of linearity. If set to 0,
it will use as many pairs of points as points there are in the sample. Defaults to 0.
* tol: tolerance considered in the assessment of equality. Defaults to 10**(-8).
* round_to: precision of the results printed. Defaults to 3.
Prints to screen or file, for each of the constraints:
* Constraint. g indicates inequality constraint of type <=, h indicates equality constraint.
* Equality constraints:
* Effectiveness >=0: fraction of the sampled points that satisfy this constraint or
violate it superiorly.
* Effectiveness <=0: fraction of the sampled points that satisfy this constraint or
violate it inferiorly.
* Number of feasible points found.
* Inequality constraints:
* Effectiveness >0: fraction of the sampled points that violate this constraint.
* Redundancy wrt all other ic: if there is more than one inequality constraint, fraction
of the points violating this inequality constraint that also violate any of the other.
* Number of feasible points found.
* Pairwise redundancy of inequality constraints: table where R_ij is the redundancy of constraint
g_i (row) with respect to g_j (column), this is the fraction of the points violating g_i that
also violate g_j (column).
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"-------------------------------------------------------------------------------", file=output)
print("C-FEASIBILITY", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
if self.c_dim == 0:
print("This is an unconstrained problem.", file=output)
else:
results = self._c_effectiveness(tol)
redundancy = self._ic_redundancy(tol)
for c in range(self.c_dim - self.ic_dim):
print("Constraint h_" + str(c + 1) + " :", file=output)
print(" Effectiveness >=0 : ", [
round(1 - results[c][0] + results[c][1], round_to)], file=output)
print(" Effectiveness <=0 : ",
[round(results[c][0], round_to)], file=output)
print(" Number of feasible points found : ", [
int(round(results[c][1] * self.npoints, 0))], file=output)
for c in range(-self.ic_dim, 0):
print(
"Constraint g_" + str(c + self.ic_dim + 1) + " : ", file=output)
print(" Effectiveness >0 : ",
[round(1 - results[c][0], round_to)], file=output)
if self.ic_dim > 1:
print(" Redundancy wrt. all other ic : ",
[round(redundancy[0][c], round_to)], file=output)
print(" Number of feasible points found : ", [
int(round(results[c][0] * self.npoints, 0))], file=output)
if self.ic_dim > 1:
print("Pairwise redundancy (ic) :", file=output)
print("_____|", end='', file=output)
for i in range(self.ic_dim - 1):
print(("g" + str(i + 1)).center(8), end='|', file=output)
print(("g" + str(self.ic_dim)).center(8), end='|', file=output)
for i in range(self.ic_dim):
print(file=output)
print((" g" + str(i + 1)).ljust(5), end='|', file=output)
for j in range(self.ic_dim):
print(
str(round(redundancy[1][i][j], round_to)).center(8), end='|', file=output)
print(file=output)
if output is not None:
output.close()
##########################################################################
# CONSTRAINTS LINEARITY
##########################################################################
def c_linearity(self, npairs=0, tol=10 ** (-8), round_to=3):
"""
This function gives the user information about the probability of linearity of the constraint
function(s). See analysis._c_lin for a more thorough description of this test.
**USAGE:**
analysis.c_linearity([n_pairs=1000, tolerance=10**(-8), round_to=4])
* n_pairs: number of pairs of points used in the test. If set to 0, it will use as many pairs
of points as points there are in the sample. Defaults to 0.
* tol: tolerance considered to rate the function as linear between two points. Defaults to 10**(-8).
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Number of pairs of points used in test.
* Probability of linearity [0,1] of each constraint.
**NOTE:** integer variable values are fixed during each of the tests and linearity or convexity
is assessed as regards the continuous part of the chromosome.
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"-------------------------------------------------------------------------------", file=output)
print("C-LINEARITY", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
if self.c_dim == 0:
print("This is an unconstrained problem.", file=output)
else:
results = self._c_lin(npairs, tol)
print("Number of pairs of points used : ",
[self.c_lin_npairs], file=output)
print(" " * 5, "CONSTRAINT".center(25),
"PROBABILITY OF LINEARITY".center(25), file=output)
for c in range(self.c_dim - self.ic_dim):
print(" " * 5, ("h_" + str(c + 1)).center(25),
str([round(results[c], round_to)]).center(25), file=output)
for c in range(-self.ic_dim, 0):
print(" " * 5, ("g_" + str(self.c_dim - self.ic_dim + c)).center(25),
str([round(results[c], round_to)]).center(25), file=output)
if output is not None:
output.close()
##########################################################################
# CONSTRAINTS REGRESSION
##########################################################################
def c_regression(self, degree=[], interaction=False, pred=True, tol=10 ** (-8), round_to=3):
"""
This function performs polynomial regressions on each constraint function and measures the
precision of these regressions.
**USAGE:**
analysis.c_regression(degree=[1,1,2] [, interaction=[False,True,False], pred=True, tol=10**(-8),round_to=4])
* degree: integer (or list of integers) specifying the degree of the regression(s) to perform.
* interaction: bool (or list of bools of same length as degree). If True, interaction products of
first order will be added. These are all terms of order regression_degree+1 that involve at least 2
variables. If a single boolean is input, this will be applied to all regressions performed. Defaults
to False.
* pred: bool (or list of bools of same length as degree). If True, prediction propperties will also
be evaluated (their process of evaluation involves performing one regression per point in the sample).
These are the last 2 columns of the output table. If a single boolean is input, this will be applied
to all regressions performed. Defaults to True.
* tol: tolerance to consider a coefficient of the regression model as zero. Defaults to 10**(-8).
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Degree: Degree of the regression. (i) indicates the addition of interaction products.
* F: F-statistic value of the regression.
* R2: R-square coefficient.
* R2adj: adjusted R-square coefficient.
* RMSE: Root Mean Square Eror.
* R2pred: prediction R-square coefficient.
* PRESS-RMSE: prediction RMSE.
**REF:** http://www.cavs.msstate.edu/publications/docs/2005/01/741A%20comparative%20study.pdf
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
if isinstance(degree, int):
degree = [degree]
if len(degree) > 0:
if isinstance(interaction, bool):
interaction = [interaction] * len(degree)
if isinstance(pred, bool):
pred = [pred] * len(degree)
if len(degree) != len(interaction) or len(degree) != len(pred):
raise ValueError(
"analysis.c_regression: format of arguments is incorrect")
print(
"-------------------------------------------------------------------------------", file=output)
print("C-REGRESSION", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
if self.c_dim == 0:
print("This is an unconstrained problem.", file=output)
else:
properties = []
for deg, inter, predi in zip(degree, interaction, pred):
properties.append(self._regression_properties(
degree=deg, interaction=interaction, mode='c', pred=predi, tol=tol, w=None))
for c in range(self.c_dim):
if c < self.c_dim - self.ic_dim:
print("CONSTRAINT h_" + str(c + 1) + " :", file=output)
else:
print(
"CONSTRAINT g_" + str(c - self.c_dim + self.ic_dim + 1) + " :", file=output)
spaces = [7, 17, 9, 9, 11, 9, 11]
print("DEGREE".center(spaces[0]), "F*".center(spaces[1]), "R2".center(spaces[2]), "R2adj".center(spaces[
3]), "RMSE".center(spaces[4]), "R2pred".center(spaces[5]), "PRESS-RMSE".center(spaces[6]), file=output)
for deg, inter, prop in zip(degree, interaction, properties):
if inter:
print(
(str(deg) + '(i)').center(spaces[0]), end=' ', file=output)
else:
print(
str(deg).center(spaces[0]), end=' ', file=output)
for i, s in zip(prop[c], spaces[1:]):
if i is None:
print("X".center(s), end=' ', file=output)
else:
string = str(i).split('e')
if len(string) > 1:
print(
(str(round(float(string[0]), round_to)) + 'e' + string[1]).center(s), end=' ', file=output)
else:
print(
str(round(i, round_to)).center(s), end=' ', file=output)
print(file=output)
if output is not None:
output.close()
##########################################################################
# CONSTRAINTS SENSITIVITY
##########################################################################
def c_sensitivity(self, plot_gradient_sparsity=True, plot_pcp=True, plot_inverted_pcp=True, sample_size=0, h=0.01, conv_tol=10 ** (-6), zero_tol=10 ** (-8), tmax=15, round_to=3):
"""
This function evaluates the jacobian matrix of the constraint fucntions in a subset of the
sample in order to extract information about sensitivity of the constraints with respect
to the search variables. All results are presented per constraint.
**USAGE:**
analysis.c_sensitivity([plot_gradient_sparsity=True, plot_pcp=True, plot_inverted_pcp=True, sample_size=0, h=0.01, conv_tol=10**(-6), zero_tol=10**(-8), tmax=15,round_to=3])
* plot_gradient_sparsity: if True, the Jacobian matrix sparsity plot will be generated.
* plot_pcp: if True, the c-gradient PCP (with chromosome in X-axis) will be generated. Defaults to True.
* plot_inverted_pcp: if True, the c-gradient PCP (with F in X-axis) will be generated. Defaults to True.
* sample_size: number of points to calculate the c-gradient at. If set to 0, all the sample will be picked. Defaults to 0.
* h: initial fraction of the search space span used as dx for evaluation of derivatives.
* conv_tol: convergence parameter for Richardson extrapolation method. Defaults to 10**(-6).
* zero_tol: tolerance for considering a component as nule during the sparsity test. Defaults to 10**(-8).
* tmax: maximum of iterations for Richardson extrapolation. Defaults to 15.
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* Number of points used.
* Percentiles 0, 25, 50, 75 and 100 of the distribution of:
* C-Gradient norm.
* abs(dFx)_max/abs(dFx)_min: ratio of maximum to minimum absolute value of partial derivatives in that constraint function gradient.
* C-Gradient sparsity: fraction of components of the c-gradient that are nule at every point.
**Shows or saves to file:**
* C-Gradient/Jacobian sparsity plot.
* C-Gradient/Jacobian PCP with chromosome in X-axis.
* C-Gradient/Jacobian PCP with fitness in X-axis.
**NOTE:** this function calls analysis._get_gradient, which stores a great number of properties
as class attributes. See its entry for more information about these attributes.
"""
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"-------------------------------------------------------------------------------", file=output)
print("C-SENSITIVITY ", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
if self.c_dim == 0:
print("This is an unconstrained problem.", file=output)
else:
self._get_gradient(
sample_size=sample_size, h=h, grad_tol=conv_tol, zero_tol=zero_tol, tmax=tmax, mode='c')
g = self._grad_properties(tol=zero_tol, mode='c')
for f in range(self.ic_dim):
if self.ic_dim > 1:
print("CONSTRAINT g_" + str(f + 1) + " :", file=output)
print(" Percentiles : ".ljust(28), "0".center(9), "25".center(9), "50".center(
9), "75".center(9), "100".center(9), "", sep="|", file=output)
print(" Gradient norm :".ljust(28), str(round(g[0][f][0], round_to)).center(9), str(round(g[0][f][1], round_to)).center(9), str(round(
g[0][f][2], round_to)).center(9), str(round(g[0][f][3], round_to)).center(9), str(round(g[0][f][4], round_to)).center(9), "", sep="|", file=output)
print(" |dFx|_max/|dFx|_min :".ljust(28), str(round(g[1][f][0], round_to)).center(9), str(round(g[1][f][1], round_to)).center(9), str(round(
g[1][f][2], round_to)).center(9), str(round(g[1][f][3], round_to)).center(9), str(round(g[1][f][4], round_to)).center(9), "", sep="|", file=output)
print(" Gradient sparsity : ",
"[" + str(round(self.c_grad_sparsity, round_to)) + "]", file=output)
if output is not None:
output.close()
if plot_gradient_sparsity:
self.plot_gradient_sparsity(zero_tol=zero_tol, mode='c')
if plot_pcp and self.cont_dim > 1:
self.plot_gradient_pcp(mode='c', invert=False)
if plot_inverted_pcp and self.c_dim > 1:
self.plot_gradient_pcp(mode='c', invert=True)
# CONSTRAINTS
def _c_lin(self, n_pairs=0, threshold=10 ** (-10)):
"""
Tests the probability of linearity of the constraint violation distributions obtained. A
pair of points (X1,C1),(X2,C2) from the sample is selected per test and a random convex
combination of them is taken (Xconv,Fconv). For each constraint, if C(Xconv)=Cconv within
tolerance, the constraint is considered linear there. The average of all tests performed
gives the overall result.
**USAGE:**
analysis._c_lin([n_pairs=100, threshold=10**(-10)])
* n_pairs: number of pairs of points used in the test. If set to 0, it will use as many pairs
of points as points there are in the sample. Defaults to 0.
* threshold: tolerance considered to rate the constraint as linear between two points.
Defaults to 10**(-10).
**Returns**:
* p_lin[fitness dimension]: probability of linearity [0,1].
**NOTE:** integer variable values are fixed during each of the tests and linearity
is evaluated as regards the continuous part of the chromosome.\n
"""
if self.npoints == 0:
raise ValueError(
"analysis._c_lin: sampling first is necessary")
if self.c_dim == 0:
raise ValueError(
"analysis._c_lin: this test makes no sense for unconstrained optimisation")
if len(self.c) == 0:
raise ValueError(
"analysis._c_lin: computing constraints first is necessary")
if self.cont_dim == 0:
raise ValueError(
"analysis._c_lin: this test makes no sense for purely integer problems")
if n_pairs == 0:
n_pairs = self.npoints
try:
from numpy.random import random, randint
from numpy import array, multiply, zeros, divide
except ImportError:
raise ImportError(
"analysis._c_lin needs numpy to run. Is it installed?")
p_lin = zeros(self.c_dim)
for i in range(n_pairs):
i1 = randint(self.npoints)
i2 = randint(self.npoints)
while (i2 == i1):
i2 = randint(self.npoints)
r = random()
x = r * array(self.points[i1]) + (1 - r) * array(self.points[i2])
if self.cont_dim != self.dim:
x[self.cont_dim:] = self.points[i1][self.cont_dim:]
x = multiply(
array(x), array(self.ub) - array(self.lb)) + array(self.lb)
x2 = multiply(array(self.points[i2][:self.cont_dim] + self.points[i1][
self.cont_dim:]), array(self.ub) - array(self.lb)) + array(self.lb)
c2 = divide(self.prob.compute_constraints(x2), self.c_span)
c_lin = r * array(self.c[i1]) + (1 - r) * array(c2)
else:
x = multiply(
array(x), array(self.ub) - array(self.lb)) + array(self.lb)
c_lin = r * array(self.c[i1]) + (1 - r) * array(self.c[i2])
c_real = divide(self.prob.compute_constraints(x), self.c_span)
delta = c_lin - c_real
for j in range(self.c_dim):
if abs(delta[j]) < threshold:
p_lin[j] += 1
p_lin /= n_pairs
self.c_lin_npairs = n_pairs
return list(p_lin)
# NEVER CALL AFTER SCALING!!! (sample calls it default)
def _compute_constraints(self):
"""
Computes the constraint function values of the points in the sample.
**USAGE:**
analysis._compute_constraints()
Stores as attribute:
* analysis.c: unscaled constraint value distribution.
* analysis.c_span: scale factors for constraint function values.
**NOTE:** Never call this function after having scaled the dataset. _sample function calls it
automatically if the problem is a constrained one, and then calls _scale_sample.\n
"""
if self.npoints == 0:
raise ValueError(
"analysis._compute_constraints: sampling first is necessary")
try:
from numpy.random import random, randint
from numpy import array, multiply, ptp, amax, amin
except ImportError:
raise ImportError(
"analysis._compute_constraints needs numpy to run. Is it installed?")
self.c = []
self.c_span = []
if self.c_dim != 0:
for i in range(self.npoints):
self.c.append(
list(self.prob.compute_constraints(self.points[i])))
temp0 = ptp(self.c, 0).tolist()
temp1 = amax(self.c, 0).tolist()
temp2 = abs(amin(self.c, 0)).tolist()
self.c_span = [max(j, k, l)
for j, k, l in zip(temp0, temp1, temp2)]
def _c_effectiveness(self, tol=10 ** (-8)):
"""
Evaluates constraint effectiveness for a constraint problem.
**USAGE:**
analysis._c_effectiveness([tol=10**(-10)])
* tol: tolerance for assessment of equality.
**Returns**:
* c[constraint dimension][2]:
* c[i][0] is the <= effectiveness of the constraint i (fraction of sample <=).
* c[i][1] is the == effectiveness of the constraint i (fraction of sample ==).
"""
if self.npoints == 0:
raise ValueError(
"analysis._c_effectiveness: sampling first is necessary")
c = []
if self.c_dim != 0:
if len(self.c) == 0:
raise ValueError(
"analysis._c_effectiveness: compute constraints first")
dp = 1. / self.npoints
for i in range(self.c_dim):
c.append([0, 0])
for j in range(self.npoints):
if self.c[j][i] <= tol:
c[i][0] += dp
if abs(self.c[j][i]) < tol:
c[i][1] += dp
return c
def _ic_redundancy(self, tol=10 ** (-8)):
"""
Evaluates redundancy of inequality constraints, both of each constraint wrt all the rest and
pairwise.
**USAGE:**
analysis._ic_redundancy([tol=10**(-10)])
* tol: tolerance for assessment of equality.\n
**Returns** tuple of 2:
* redundancy[inequality constraint dimension]: redundancy of each inequality constraint with respect to all other inequality constraints. redundancy[i] is the fraction of points violating constraint g_i that also violate any other inequality constraint.
* m[inequality constraint dimension][inequality constraint dimension]: pairwise redundancy. m[i][j] is the fraction of points violating constraint g_i that also violate constraint g_j.
"""
if self.npoints == 0:
raise ValueError(
"analysis._constraint_feasibility: sampling first is necessary")
ec_f = []
if self.ic_dim != 0:
if len(self.c) == 0:
raise ValueError(
"analysis._constraint_feasibility: compute constraints first")
try:
from numpy import dot, transpose, array
except ImportError:
raise ImportError(
"analysis._c_lin needs numpy to run. Is it installed?")
redundancy = [0 for i in range(self.ic_dim)]
violation = []
for i in range(self.npoints):
violation.append([0] * self.ic_dim)
count = 0
for j in range(-self.ic_dim, 0):
if self.c[i][j] > tol:
violation[i][j] = 1.
if count == 0:
first_index = j
elif count == 1:
redundancy[first_index] += 1.
redundancy[j] += 1.
else:
redundancy[j] += 1.
count += 1
m = dot(transpose(violation), violation)
for i in range(self.ic_dim):
d = float(m[i][i])
if d > 0:
redundancy[i] = redundancy[i] / d
for j in range(self.ic_dim):
m[i][j] = m[i][j] / d
else:
redundancy[i] = 1
m[i] = [1] * self.ic_dim
return (redundancy, m.tolist())
##########################################################################
# LOCAL SEARCH
##########################################################################
def local_search(self, clusters_to_show=10, plot_global_pcp=True, plot_separate_pcp=True, scatter_plot_dimensions=[],
sample_size=0, algo=None, decomposition_method='tchebycheff', weights='uniform', z=[], con2mo='obj_cstrsvio',
variance_ratio=0.9, k=0, single_cluster_tolerance=0.0001, kmax=0, round_to=3):
"""
This function selects points from the sample and launches local searches using them as initial
points. Then it clusters the results and orders the clusters ascendently as regards fitness
value of its centroid (after transformation for constraint problems and fitness decomposition
for multi-objective problems). The clustering is conducted by means of the k-Means algorithm
in the search-fitness space. Some parameters are also computed after the clustering to allow
landscape analysis and provide insight into the basins of attraction that affect the algorithm
deployed.
**USAGE:**
analysis.local_search([clusters_to_show=10, plot_global_pcp=True, plot_separate_pcp=True, scatter_plot_dimensions=[], sample_size=0, algo=algorithm.(), decomposition_method='tchebycheff', weights='uniform', z=[],con2mo='obj_cstrsvio', variance_ratio=0.9, k=0,single_cluster_tolerance=0.001, kmax=0, round_to=3])
* clusters_to_show: number of clusters whose parameters will be displayed. Option 'all' will
display all clusters obtained. Clusters will be ordered ascendently as regards mean fitness
value (after applying problem.con2mo in the case of constrained problems and problem.decompose
for multi-objective problems), and the best ones will be shown. This parameters also affects
the plots.
* plot_global_pcp: if True, the local search cluster PCP will be generated, representing all
clusters to show in the same graph. See plot_local_cluster_pcp for more information on this
plot. Defaults to True.
* plot_separate_pcp: if True, as many PCPs as clusters_to_show will be generated, representing
a cluster per graph. See plot_local_cluster_pcp for more information on this plot. Defaults to
True.
* scatter_plot_dimensions: integer or list of up to 3 integers specifying the dimensions to
consider for the local search cluster scatter plot. Option 'all' will pick all dimensions.
Option [] will not generate the scatter plot. Defaults to [].
* sample_size: number of initial points to launch local searches from. If set to 0, all
points in sample are used, otherwise they are selected randomly in the initial set. Defaults to 0.
* algo: algorithm object used in searches. For purposes, it should be a local optimisation
algorithm. Defaults to algorithm.cs().
* par: if True, an unconnected archipelago will be used for possible parallelization.
* decomposition_method: method used by problem.decompose in the case of multi-objective
problems. Options are: 'tchebycheff', 'weighted', 'bi' (boundary intersection).
Defaults to 'tchebycheff'.
* weights: weight vector used by problem.decompose in the case of multi-objective
problems. Options are: 'uniform', 'random' or any vector of length [fitness dimension]
whose components sum to one with precision of 10**(-8). Defaults to 'uniform'.
* z: ideal reference point used by 'tchebycheff' and 'bi' methods. If set to [] (empty
vector), point [0,0,...,0] is used. Defaults to [].
* con2mo: way in which constraint problems will be transformed into multi-objective problems before decomposition. Defaults to 'obj_cstrsvio'. Options are:
* 'obj_cstrs': f1=original objective, f2=number of violated constraints.
* 'obj_cstrsvio': f1=original objective, f2=norm of total constraint violation.
* 'obj_eqvio_ineqvio': f1=original objective, f2= norm of equality constraint violation, f3= norm of inequality constraint violation.
* None: in this case the function won't try to transform the constraint problem via meta-problem con2mo. Set to None when a local search algorithm that supports constraint optimization is input.
* variance_ratio: target fraction of variance explained by the cluster centroids, when not
clustering to a fixed number of clusters. Defaults to 0.9.
* k: number of clusters when clustering to fixed number of clusters. If k=0, the clustering
will be performed for increasing value of k until the explained variance ratio is achieved.
Defaults to 0.
* single_cluster_tolerance: if the radius of a single cluster is lower than this value
times (search space dimension+fitness space dimension), k will be set to 1 when not clustering
to a fixed number of clusters. Defaults to 0.0001.
* kmax: maximum number of clusters admissible. If set to 0, the limit is the number of local
searches performed. Defaults to 0.
* round_to: precision of the results printed. Defaults to 3.\n
**Prints to screen or file:**
* Number of local searches performed.
* Quartiles of CPU time per search: percentiles 0, 25, 50, 75 and 100 of the time elapsed per
single local search.
* Cluster properties: the following parameters will be shown for the number of clusters specified via argument clusters_to_show:
* Size: size of the cluster, in number of points and as a percentage of the sample size.
* Cluster X_center: projection of the cluster centroid in the search space.
* Mean objective value: projection of the cluster centroid in the fitness space.
* F(X_center): fitness value of the X_center. If it differs abruptly from the cluster mean objective value, the odds are that the cluster spans through more than one mode of the fitness function.
* C(X_center): constraint function values of the X_center. Only for constrained problems.
* Cluster span in F: peak-to-peak values of the fitness values of the local search final points in the cluster.
* Cluster radius in X: euclidian distance from the furthest final local search point in the cluster to the cluster X-center.
* Radius of attraction: euclidian distance from the furthest initial local search point in the cluster to the cluster X-center.
**Shows or saves to file:**
* Global cluster PCP: PCP of the clusters of the local search results, all clusters to show on
the same graph. See analysis.plot_local_cluster_pcp for more information on the plot.
* Separate cluster PCP: PCP of the clusters of the local search results, one graph per cluster.
See analysis.plot_local_cluster_pcp for more information on the plot.
* Cluster scatter plot: scatter plot of the clusters of the local search results. See
analysis.plot_local_cluster_scatter for more information on the plot.
**NOTE:** this function calls analysis._get_local_extrema and analysis._cluster_local_extrema. Both
these functions store a great number of properties as class attributes. See ther respective
entries for more information about these attributes.
"""
from numpy import percentile
from PyGMO import algorithm
if algo is None:
algo = algorithm.cs()
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"--------------------------------------------------------------------------------", file=output)
print("LOCAL SEARCH", file=output)
print(
"--------------------------------------------------------------------------------", file=output)
if output is not None:
output.close()
self._get_local_extrema(
sample_size, algo, True, decomposition_method, weights, z, con2mo, True)
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
self._cluster_local_extrema(
variance_ratio, k, single_cluster_tolerance, kmax)
if clusters_to_show == 'all' or clusters_to_show > self.local_nclusters:
clusters_to_show = self.local_nclusters
print("Local searches performed : ",
self.local_initial_npoints, file=output)
print("Quartiles of CPU time per search [ms]: ", round(percentile(self.local_search_time, 0), round_to), "/", round(percentile(self.local_search_time, 25), round_to), "/", round(
percentile(self.local_search_time, 50), round_to), "/", round(percentile(self.local_search_time, 75), round_to), "/", round(percentile(self.local_search_time, 100), round_to), file=output)
if clusters_to_show > 0:
print("Number of clusters identified : ",
self.local_nclusters, file=output)
print("Cluster properties (max. best " +
str(clusters_to_show) + " clusters) :", file=output)
for i in range(min((self.local_nclusters, clusters_to_show))):
print(" Cluster n. " + str(i + 1) + ' :', file=output)
print(" Size: ", self.local_cluster_size[
i], ", ", 100 * round(self.local_cluster_size[i] / self.local_initial_npoints, 4), "%", file=output)
print(" Cluster X_center : ", [
round(x, round_to) for x in self.local_cluster_x_centers[i]], file=output)
print(" Mean objective value : ", [
round(f, round_to) for f in self.local_cluster_f_centers[i]], file=output)
print(" F(X_center) : ", [
round(f, round_to) for f in self.local_cluster_f[i]], file=output)
if self.c_dim > 0:
print(" C(X_center) : ", [
round(c, round_to) for c in self.local_cluster_c[i]], file=output)
print(" Cluster span in F : ", [
round(s, round_to) for s in self.local_cluster_f_span[i]], file=output)
print(" Cluster radius in X : ", round(
self.local_cluster_rx[i], round_to), file=output)
print(" Radius of attraction : ", round(
self.local_cluster_rx0[i], round_to), file=output)
if output is not None:
output.close()
if plot_global_pcp:
self.plot_local_cluster_pcp(
together=True, clusters_to_plot=clusters_to_show)
if plot_separate_pcp:
self.plot_local_cluster_pcp(
together=False, clusters_to_plot=clusters_to_show)
if isinstance(scatter_plot_dimensions, int):
scatter_plot_dimensions = [scatter_plot_dimensions]
if isinstance(scatter_plot_dimensions, (list, tuple)):
scatter_plot_dimensions = [i - 1 for i in scatter_plot_dimensions]
if len(scatter_plot_dimensions) > 0:
self.plot_local_cluster_scatter(
dimensions=scatter_plot_dimensions, clusters_to_plot=clusters_to_show)
# LOCAL SEARCH
def _get_local_extrema(self, sample_size=0, algo=None, par=True, decomposition_method='tchebycheff', weights='uniform', z=[], con2mo='obj_cstrsvio', warning=True):
"""
Selects points from the sample and launches local searches using them as initial points.
**USAGE:**
analysis._get_local_extrema([sample_size=0, algo=algorithm.cs(), par=True, decomposition_method='tchebycheff', weights='uniform', z=[], con2mo='obj_cstrsvio', warning=True])
* sample_size: number of initial points to launch local searches from. If set to 0, all
points in sample are used. Defaults to 0.
* algo: algorithm object used in searches. For purposes, it should be a local
optimisation algorithm. Defaults to algorithm.cs().
* par: if True, an unconnected archipelago will be used for possible parallelization.
* decomposition_method: method used by problem.decompose in the case of multi-objective
problems. Options are: 'tchebycheff', 'weighted', 'bi' (boundary intersection).
Defaults to 'tchebycheff'.
* weights: weight vector used by problem.decompose in the case of multi-objective
problems. Options are: 'uniform', 'random' or any vector of length [fitness dimension]
whose components sum to one with precision of 10**(-8). Defaults to 'uniform'.
* z: ideal reference point used by 'tchebycheff' and 'bi' methods. If set to [] (empty
vector), point [0,0,...,0] is used. Defaults to [].
* con2mo: way in which constraint problems will be transformed into multi-objective problems before decomposition. Options are:
* 'obj_cstrs': f1=original objective, f2=number of violated constraints.
* 'obj_cstrsvio': f1=original objective, f2=norm of total constraint violation.
* 'obj_eqvio_ineqvio': f1=original objective, f2= norm of equality constraint violation, f3= norm of inequality constraint violation.
* None: in this case the function won't try to transform the constraint problem via meta-problem con2mo. Set to None when using a local search algorithm that supports constraint optimization.
* warning: if True, a warning showing transformation method will be shown when applying con2mo
meta-problem, and another warning with the decomposition method and parameters will be shown
when applying decompose meta-problem to a multi-objective problem.
**The following parameters are stored as attributes:**
* analysis.local_initial_npoints: number of initial points used for local searches (number
of searches performed).
* analysis.local_initial_points[number of searches]: index of each initial point in the
list of sampled points. If the whole sample is used, the list is sorted.
* analysis.local_search_time[number of searches]: time elapsed in each local search
miliseconds).
* analysis.local_extrema [number of searches][search space dimension]: resulting point of
each of the local searches.
* analysis.local_f [number of searches][fitness dimension]: real fitness value of each
of the resulting points
* analysis.local_f_dec[number of searches]: fitness value of points after con2mo in constraint
and decompose in multi-objective problems. Used to rate and order clusters.\n
"""
from PyGMO import archipelago, island, population, algorithm
if algo is None:
algo = algorithm.cs()
if self.npoints == 0:
raise ValueError(
"analysis._get_local_extrema: sampling first is necessary")
if not isinstance(algo, algorithm._algorithm._base):
raise ValueError(
"analysis._get_local_extrema: input a valid pygmo algorithm")
try:
from numpy.random import randint
from numpy import array, ptp
except ImportError:
raise ImportError(
"analysis._get_local_extrema needs numpy to run. Is it installed?")
from time import time
if sample_size <= 0 or sample_size >= self.npoints:
self.local_initial_points = range(self.npoints)
self.local_initial_npoints = self.npoints
else:
self.local_initial_npoints = sample_size
self.local_initial_points = [
randint(self.npoints) for i in range(sample_size)] # avoid repetition?
self.local_extrema = []
# self.local_neval=[]// pygmo doesn't return it
self.local_search_time = []
self.local_f = []
self.local_f_dec = []
if con2mo is None:
prob = self.prob
elif self.c_dim > 0:
prob = problem.con2mo(self.prob, method=con2mo)
if warning:
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
'WARNING: get_local_extrema is being transformed to MO by means of ' + con2mo, file=output)
if self.dir is not None:
output.close()
else:
prob = self.prob
if prob.f_dimension == 1:
decomposition = prob
else:
if weights == 'uniform':
weightvector = [
1. / prob.f_dimension for i in range(prob.f_dimension)]
elif weights == 'random':
weightvector = []
else:
weightvector = weights
decomposition = problem.decompose(
prob, method=decomposition_method, weights=weightvector, z=z)
if warning:
if decomposition_method == 'tchebycheff' or decomposition_method == 'bi':
if z == []:
z = [0 for i in range(self.f_dim)]
additional_message = ' and ' + \
str(z) + ' ideal reference point!'
else:
additional_message = '!'
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
string = 'WARNING: get_local_extrema is decomposing multi-objective problem by means of ' + \
str(decomposition_method) + ' method, with ' + \
str(weights) + ' weight vector' + additional_message
length = 0
for word in string.split(' '):
length += len(word) + 1
if length > 80:
length = len(word) + 1
print('\n' + word, end=' ', file=output)
else:
print(word, end=' ', file=output)
print(file=output)
if self.dir is not None:
output.close()
if par:
archi = archipelago()
for i in range(self.local_initial_npoints):
pop = population(decomposition)
x0 = []
for j in range(self.dim):
x0.append(self.points[self.local_initial_points[i]][
j] * (self.ub[j] - self.lb[j]) + self.lb[j])
pop.push_back(x0)
isl = island(algo, pop)
if par:
archi.push_back(isl)
else:
isl.evolve(1)
self.local_search_time.append(isl.get_evolution_time())
self.local_extrema.append(
[(c - l) / (u - l) for c, u, l in zip(list(isl.population.champion.x), self.ub, self.lb)])
self.local_f_dec.append(isl.population.champion.f[0])
self.local_f.append(((array(self.prob.objfun(
isl.population.champion.x)) - array(self.f_offset)) / array(self.f_span)).tolist())
if par:
start = time()
archi.evolve(1)
archi.join()
finish = time()
for i in archi:
self.local_search_time.append(i.get_evolution_time())
self.local_extrema.append(
[(c - l) / (u - l) for c, u, l in zip(list(i.population.champion.x), self.ub, self.lb)])
self.local_f_dec.append(i.population.champion.f[0])
self.local_f.append(((array(self.prob.objfun(
i.population.champion.x)) - array(self.f_offset)) / array(self.f_span)).tolist())
f_dec_offset = min(self.local_f_dec)
f_dec_span = ptp(self.local_f_dec)
self.local_f_dec = [
(i - f_dec_offset) / f_dec_span for i in self.local_f_dec[:]]
def _cluster_local_extrema(self, variance_ratio=0.95, k=0, single_cluster_tolerance=0.0001, kmax=0):
"""
Clusters the results of a set of local searches and orders the clusters ascendently as
regards fitness value of its centroid (after transformation for constraint problems and
fitness decomposition for multi-objective problems). The clustering is conducted by means of
the k-Means algorithm in the search-fitness space. Some parameters are also computed after
the clustering to allow landscape analysis and provide insight into the basins of attraction
that affect the algorithm deployed.
**USAGE:**
analysis._cluster_local_extrema([variance_ratio=0.95, k=0, single_cluster_tolerance=0.0001, kmax=0])
* variance_ratio: target fraction of variance explained by the cluster centroids, when not
clustering to a fixed number of clusters.
* k: number of clusters when clustering to fixed number of clusters. If k=0, the clustering
will be performed for increasing value of k until the explained variance ratio is achieved.
Defaults to 0.
* single_cluster_tolerance: if the radius of a single cluster is lower than this value
times (search space dimension+fitness space dimension), k will be set to 1 when not clustering
to a fixed number of clusters. Defaults to 0.0001.
* kmax: maximum number of clusters admissible. If set to 0, the limit is the number of local
searches performed. Defaults to 0.
**The following parameters are stored as attributes:**
* analysis.local_nclusters: number of clusters obtained.
* analysis.local_cluster[number of searches]: cluster to which each point belongs.
* analysis.local_cluster_size[number of clusters]: size of each cluster.
* analysis.local_cluster_x_centers[number of clusters][search dimension]: projection of the cluster
centroid on the search space.
* analysis.local_cluster_f_centers[number of clusters][fitness dimension]: projection of the cluster
centroid on the fitness space, or mean fitness value in the cluster.
* analysis.local_cluster_f[number of clusters][fitness dimension]: fitness value of the cluster x-center.
* analysis.local_cluster_c[number of clusters][constraint dimension]: constraint function value
of the cluster x-center.
* analysis.local_cluster_f_span[number of clusters][fitness dimension]: peak-to-peak value of
each of the fitness functions inside the cluster.
* analysis.local_cluster_rx[number of clusters]: radius of each cluster in the search space,
or euclidian distance from the furthest final local search point in the cluster to the cluster
X-center.
* analysis.local_cluster_rx0[number of clusters]: radius of attraction, or euclidian distance
from the furthest initial local search point in the cluster to the cluster X-center.
"""
if self.npoints == 0:
raise ValueError(
"analysis._cluster_local_extrema: sampling first is necessary")
if self.local_initial_npoints == 0:
raise ValueError(
"analysis._cluster_local_extrema: getting local extrema first is necessary")
try:
from numpy import array, zeros, ptp
from numpy.linalg import norm
from sklearn.cluster import KMeans
except ImportError:
raise ImportError(
"analysis._cluster_local_extrema needs numpy and sklearn to run. Are they installed?")
dataset = []
if kmax == 0:
kmax = self.local_initial_npoints
for i in range(self.local_initial_npoints):
dataset.append(self.local_extrema[i][:])
dataset[i].extend(self.local_f[i])
if k != 0: # cluster to given number of clusters
clust = KMeans(k)
# storage of output
local_cluster = list(clust.fit_predict(dataset))
self.local_nclusters = k
cluster_size = zeros(k)
for i in range(self.local_initial_npoints):
cluster_size[local_cluster[i]] += 1
cluster_size = list(cluster_size)
else: # find out number of clusters
clust = KMeans(1)
total_distances = clust.fit_transform(dataset)
total_center = clust.cluster_centers_[0]
total_radius = max(total_distances)[0]
# single cluster scenario
if total_radius < single_cluster_tolerance * (self.dim + self.f_dim):
# storage of output
local_cluster = list(clust.predict(dataset))
self.local_nclusters = 1
cluster_size = [0]
for i in range(self.local_initial_npoints):
cluster_size[local_cluster[i]] += 1
cluster_size = list(cluster_size)
else:
k = 2 # multiple cluster scenario
var_tot = sum([x ** 2 for x in total_distances])
var_ratio = 0
while var_ratio <= variance_ratio and k <= kmax:
clust = KMeans(k)
y = clust.fit_predict(dataset)
cluster_size = zeros(k)
var_exp = 0
for i in range(self.local_initial_npoints):
cluster_size[y[i]] += 1
for i in range(k):
distance = norm(
clust.cluster_centers_[i] - total_center)
var_exp += cluster_size[i] * distance ** 2
var_ratio = var_exp / var_tot
k += 1
# storage of output
local_cluster = list(y)
self.local_nclusters = k - 1
# more storage and reordering so clusters are ordered best to worst
cluster_value = [clust.cluster_centers_[i][self.dim]
for i in range(self.local_nclusters)]
cluster_value = [0] * self.local_nclusters
for i in range(self.local_initial_npoints):
cluster_value[
local_cluster[i]] += self.local_f_dec[i] / cluster_size[local_cluster[i]]
order = [x for (y, x) in sorted(
zip(cluster_value, range(self.local_nclusters)))]
self.local_cluster_x_centers = []
self.local_cluster_f_centers = []
self.local_cluster_c = []
self.local_cluster_f = []
self.local_cluster = []
self.local_cluster_size = []
for i in range(self.local_nclusters):
self.local_cluster_size.append(cluster_size[order[i]])
self.local_cluster_x_centers.append(
clust.cluster_centers_[order[i]][:self.dim])
self.local_cluster_f_centers.append(
clust.cluster_centers_[order[i]][self.dim:])
self.local_cluster_f.append(((array(self.prob.objfun(array(self.local_cluster_x_centers[
i]) * (array(self.ub) - array(self.lb)) + array(self.lb))) - array(self.f_offset)) / array(self.f_span)).tolist())
if self.c_dim > 0:
self.local_cluster_c.append(((array(self.prob.compute_constraints(array(self.local_cluster_x_centers[
i]) * (array(self.ub) - array(self.lb)) + array(self.lb)))) / array(self.c_span)).tolist())
for i in range(self.local_initial_npoints):
for j in range(self.local_nclusters):
if local_cluster[i] == order[j]:
self.local_cluster.append(j)
break
# calculate cluster radius and center
self.local_cluster_rx = [0] * self.local_nclusters
self.local_cluster_rx0 = [0] * self.local_nclusters
f = [[] for i in range(self.local_nclusters)]
for i in range(self.local_initial_npoints):
c = self.local_cluster[i]
if self.local_cluster_size[c] == 1:
f[c].append([0] * self.f_dim)
else:
rx = norm(
array(self.local_extrema[i]) - array(self.local_cluster_x_centers[c]))
rx0 = norm(array(self.points[
self.local_initial_points[i]]) - array(self.local_cluster_x_centers[c]))
f[c].append(self.local_f[i])
if rx > self.local_cluster_rx[c]:
self.local_cluster_rx[c] = rx
if rx0 > self.local_cluster_rx0[c]:
self.local_cluster_rx0[c] = rx0
self.local_cluster_f_span = [
ptp(f[t], 0).tolist() for t in range(self.local_nclusters)]
##########################################################################
# LEVEL SET
##########################################################################
def levelset(self, threshold=50, k_tune=3, k_test=10, linear=True, quadratic=True, nonlinear=True, round_to=3):
"""
This function performs binary classifications of the sample via SVM and assesses its precision.
The classes are defined by a percentile threshold on a fitness function. Linear, quadratic and
nonlinear (rbf) kernels can be used, and their misclassification errors as well as p-values of
pairwise comparison can be evaluated as indicators for multi-modality. All results are presented
per objective.
**USAGE:**
analysis.levelset([threshold=[25,50], k_test=10,k_tune=3, linear=True, quadratic=False, nonlinear=True, round_to=3])
* threshold: percentile or list of percentiles that will serve as threshold for binary
classification of the sample. Defaults to 50.
* k_tune: k used in k-fold crossvalidation to tune the model hyperparameters. Defaults to 3.
* k_test: k used in k-fold crossvalidation to assess the model properties. Defaults to 10.
* linear, quadratic, nonlinear: boolean values. If True, the corresponding test will be performed. All default to true.
* round_to: precision of the results printed. Defaults to 3.
**Prints to screen or file:**
* K tune
* K test
* Percentile used as threshold.
* Mean misclassification error of each method used (linear, quadratic, nonlinear).
* One-sided p-values of each pairwise comparison (l/q, l/nl, q/nl)
"""
if isinstance(threshold, (int, float)):
threshold = [threshold]
if any([linear, quadratic, nonlinear]) and len(threshold) > 0:
if self.dir is None:
output = None
else:
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print(
"-------------------------------------------------------------------------------", file=output)
print("LEVELSET FEATURES ", file=output)
print(
"-------------------------------------------------------------------------------", file=output)
print(" K tune : ",
[k_tune], "", file=output)
print(" K test : ",
[k_test], "", file=output)
for i in threshold:
svm_results = self._svm_p_values(
threshold=i, k_test=k_test, k_tune=k_tune)
print("Percentile", i, " :", file=output)
print(" Mean Misclassification Errors ", file=output)
if linear:
print(" Linear Kernel : ", [
round(r, round_to) for r in svm_results[0]], "", file=output)
if quadratic:
print(" Quadratic Kernel : ", [
round(r, round_to) for r in svm_results[1]], "", file=output)
if nonlinear:
print(" Non-Linear Kernel (RBF): ",
[round(r, round_to) for r in svm_results[2]], "", file=output)
if any([linear and quadratic, linear and nonlinear, quadratic and nonlinear]):
print(" P-Values :", file=output)
if linear and quadratic:
print(" Linear/Quadratic : ",
[round(r, round_to) for r in svm_results[3]], "", file=output)
if linear and nonlinear:
print(" Linear/Nonlinear : ",
[round(r, round_to) for r in svm_results[4]], "", file=output)
if quadratic and nonlinear:
print(" Quadratic/Nonlinear : ",
[round(r, round_to) for r in svm_results[5]], "", file=output)
if output is not None:
output.close()
def _svm(self, threshold=50, kernel='rbf', k_tune=3, k_test=10):
"""
This function performs binary classifications of the sample via SVM and assesses its precision.
The classes are defined by a percentile threshold on a fitness function. The method is tuned by
a grid search with ranges 2**[-5,16] for C and 2**[-15,4] for gamma and cross-validation for every
combination, and the set of hyperparameters that leads to minimum mean misclassification error will
be employed. Linear, quadratic and nonlinear (rbf) kernels can be used, and their misclassification
errors can be evaluated by crossvalidation and returned as a distribution.
**USAGE:**
analysis._svm([threshold=25, kernel='rbf', k_tune=3, k_test=10])
* threshold: percentile of the fitness function that will serve as threshold for binary
classification of the sample. Defaults to 50.
* kernel: options are 'linear','quadratic' and 'rbf'. Defaults to 'rbf'.
* k_tune: k used in k-fold crossvalidation to tune the model hyperparameters. Defaults to 3.
* k_test: k used in k-fold crossvalidation to assess the model misclassification error. Defaults
to 10.
**Returns**:
* mce[fitness dimension][k_test]: misclassification errors obtained for each of the fitness functions.
"""
if self.npoints == 0:
raise ValueError(
"analysis._svm: sampling first is necessary")
if kernel != 'linear' and kernel != 'quadratic' and kernel != 'rbf':
raise ValueError(
"analysis._svm: choose a proper value for kernel ('linear','quadratic','rbf')")
if threshold <= 0 or threshold >= 100:
raise ValueError(
"analysis._svm: threshold needs to be a value ]0,100[")
try:
from numpy import arange, zeros, ones
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
except ImportError:
raise ImportError(
"analysis._svm needs numpy and scikit-learn to run. Are they installed?")
if kernel == 'quadratic':
kernel = 'poly'
c_range = 2. ** arange(-5, 16, 2)
if kernel == 'linear':
param_grid = dict(C=c_range)
else:
g_range = 2. ** arange(-15, 4, 2)
param_grid = dict(gamma=g_range, C=c_range)
per = self._percentile(threshold)
dataset = self.points[:]
mce = []
for obj in range(self.f_dim):
y = zeros(self.npoints) # classification of data
for i in range(self.npoints):
if self.f[i][obj] > per[obj]:
y[i] = 1
grid = GridSearchCV(estimator=SVC(
kernel=kernel, degree=2), param_grid=param_grid, cv=StratifiedKFold(y, k_tune))
grid.fit(dataset, y)
test_score = cross_val_score(
estimator=grid.best_estimator_, X=dataset, y=y, scoring=None, cv=StratifiedKFold(y, k_test))
mce.append((ones(k_test) - test_score).tolist())
return mce # mce[n_obj][k_test]
def _svm_p_values(self, threshold=50, k_tune=3, k_test=10, l=True, q=True, n=True):
"""
This function calls analysis._svm several times with identical parameters (threshold, k_tune
and k_test) but different kernels, and **Returns** the mean misclassification errors of each
method deployed as well as the p-values of their pairwise comparison.
**USAGE:**
analysis._svm_p_values([threshold=25, k_tune=3, k_test=10, l=True, q=False, nl=True])
* threshold: percentile of the fitness function that will serve as threshold for binary classification
of the sample. Defaults to 50.
* k_tune: k used in k-fold crossvalidation to tune the model hyperparameters. Defaults to 3.
* k_test: k used in k-fold crossvalidation to assess the model misclassification error. Defaults
to 10.
* l: if True, the linear kernel model will be included.
* q: if True, the quadratic kernel model will be included.
* n: if True, the non-linear (rbf) kernel model will be included.
**Returns** a tuple of length 6 containing:
* mmce_linear[fitness dimension]: mean misclassification error of linear kernel model.
* mmce_quadratic[fitness dimension]: mean misclassification error of quadratic kernel model.
* mmce_nonlinear[fitness dimension]: mean misclassification error of nonlinear (rbf) kernel model.
* l_q[fitness dimension]: p-value of the comparison between distributions of mce for linear and
quadratic kernels.
* l_n[fitness dimension]: p-value of the comparison between distributions of mce for linear and
nonlinear (rbf) kernels.
* q_n[fitness dimension]: p-value of the comparison between distributions of mce for quadratic
and nonlinear (rbf) kernels.
**NOTE:** if any of the booleans (l,q,n) is set to False and the corresponding model is not fit, the
function will return -1 for all the associated results.
"""
if any([l, q, n]):
if self.npoints == 0:
raise ValueError(
"analysis._svm_p_values: sampling first is necessary")
try:
from scipy.stats import mannwhitneyu
from numpy import mean
except ImportError:
raise ImportError(
"analysis._svm_p_values needs scipy and numpy to run. Is it installed?")
if l:
linear = self._svm(
threshold=threshold, kernel='linear', k_tune=k_tune, k_test=k_test)
else:
linear = [[-1] * self.f_dim]
if q:
quadratic = self._svm(
threshold=threshold, kernel='quadratic', k_tune=k_tune, k_test=k_test)
else:
quadratic = [[-1] * self.f_dim]
if n:
nonlinear = self._svm(
threshold=threshold, kernel='rbf', k_tune=k_tune, k_test=k_test)
else:
nonlinear = [[-1] * self.f_dim]
l_q = []
q_n = []
l_n = []
for i in range(self.f_dim):
if l and q:
try:
l_q.append(mannwhitneyu(linear[i], quadratic[i])[1])
except ValueError:
l_q.append(0.5)
else:
l_q.append(-1)
if l and n:
try:
l_n.append(mannwhitneyu(linear[i], nonlinear[i])[1])
except ValueError:
l_n.append(0.5)
else:
l_n.append(-1)
if n and q:
try:
q_n.append(mannwhitneyu(quadratic[i], nonlinear[i])[1])
except ValueError:
q_n.append(0.5)
else:
q_n.append(-1)
return (list(mean(linear, 1)), list(mean(quadratic, 1)), list(mean(nonlinear, 1)), l_q, l_n, q_n)
##########################################################################
# PLOT FUNCTIONS
##########################################################################
def plot_f_distr(self):
"""
Routine that plots the f-distributions in terms of density of probability of a fitness value
in the sample considered.
**USAGE:**
analysis.plot_f_distr()
**NOTE:** the plot will be shown on screen or saved to file depending on the option that was
selected when instantiating the analysis class.
"""
try:
from scipy.stats import gaussian_kde
from matplotlib.pyplot import plot, draw, title, show, legend, axes, cla, clf, xlabel, ylabel
except ImportError:
raise ImportError(
"analysis.plot_f_distr needs scipy and matplotlib to run. Are they installed?")
if self.npoints == 0:
raise ValueError(
"analysis.plot_f_distr: sampling first is necessary")
ax = axes()
for i in range(self.f_dim):
tmp = []
for j in range(self.npoints):
tmp.append(self.f[j][i])
x = sorted(tmp)
kde = gaussian_kde(x)
y = kde(x)
ax.plot(x, y, label='objective ' + str(i + 1))
title('F-Distribution(s)')
xlabel('F')
ylabel('dP/dF')
legend()
f = ax.get_figure()
if self.dir is None:
show(f)
cla()
clf()
else:
f.savefig(self.dir + '/figure_' + str(self.fignum) + '.png')
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print('*F-distribution plot : <figure_' +
str(self.fignum) + '.png>', file=output)
self.fignum += 1
cla()
clf()
output.close()
def plot_x_pcp(self, percentile=[], percentile_values=[]):
"""
Routine that creates parallel coordinate plots of the chromosome of all points in the sample
classified in ranges defined by the list of percentiles input. A plot per objective will be
generated.
**USAGE:**
analysis.plot_x_pcp(percentile=[5,10,25,50,75] [, percentile_values=[0.06,0.08,0.3,0.52,0.8]])
* percentile: the percentile or list of percentiles that will serve as limits
to the intervals in which the f-values are classified.
* percentile_values: the f-values corresponding to the aforementioned percentiles.
This argument is added for reusability, if set to [], they will be calculated.
Defaults to [].
**NOTE:** the plot will be shown on screen or saved to file depending on the
option that was selected when instantiating the analysis class.
"""
try:
from pandas.tools.plotting import parallel_coordinates as pc
from pandas import DataFrame as df
from matplotlib.pyplot import show, title, grid, ylabel, xlabel, legend, cla, clf
from numpy import asarray, transpose
except ImportError:
raise ImportError(
"analysis.plot_x_pcp needs pandas, numpy and matplotlib to run. Are they installed?")
if isinstance(percentile, (int, float)):
percentile = [percentile]
if len(percentile) == 0:
raise ValueError(
"analysis.plot_x_pcp: introduce at least one percentile to group data")
else:
if isinstance(percentile_values, (int, float)):
percentile_values = [percentile_values]
if len(percentile_values) == 0 or len(percentile_values) != len(percentile):
percentile_values = self._percentile(percentile)
percentile = sorted(percentile)
percentile_values = sorted(percentile_values)
while percentile[0] <= 0:
percentile = percentile[1:]
percentile_values = percentile_values[1:]
while percentile[-1] >= 100:
percentile = percentile[:-1]
percentile_values = percentile_values[:-1]
labels = [str(a) + "-" + str(b)
for a, b in zip([0] + percentile, percentile + [100])]
for obj in range(self.f_dim):
dataset = []
for i in range(self.npoints):
dataset.append([])
if self.f[i][obj] >= percentile_values[-1][obj]:
dataset[i].append(labels[-1])
else:
for j in range(len(percentile)):
if self.f[i][obj] < percentile_values[j][obj]:
dataset[i].append(labels[j])
break
dataset[i].extend(self.points[i])
dataset = df(
sorted(dataset, key=lambda row: -float(row[0].split('-')[0])))
# dataset=df(dataset)
title('X-PCP : objective ' + str(obj + 1) + '\n')
grid(True)
xlabel('Dimension')
ylabel('Chromosome (scaled)')
plot = 0
plot = pc(dataset, 0)
f = plot.get_figure()
if self.dir is None:
show(f)
cla()
clf()
else:
f.savefig(
self.dir + '/figure_' + str(self.fignum) + '.png')
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print('*X-PCP plot obj.' + str(obj + 1) +
' : <figure_' + str(self.fignum) + '.png>', file=output)
self.fignum += 1
cla()
clf()
output.close()
def plot_gradient_sparsity(self, zero_tol=10 ** (-8), mode='f'):
"""
Plots sparsity of jacobian matrix. A position is considered a zero if its mean
absolute value is lower than tolerance.
**USAGE:**
analysis.plot_gradient_sparsity([zero_tol=10**(-10), mode='c'])
* zero_tol: tolerance to consider a term as zero.
* mode: 'f'/'c' to act on the fitness/constraint function jacobian matrix.
**NOTE:** the plot will be shown on screen or saved to file depending on the option that was
selected when instantiating the analysis class.
"""
if mode == 'f':
if self.grad_npoints == 0:
raise ValueError(
"analysis.plot_gradient_sparsity: sampling and getting gradient first is necessary")
dim = self.f_dim
elif mode == 'c':
if self.c_dim == 0:
raise ValueError(
"analysis.plot_gradient_sparsity: mode 'c' selected for unconstrained problem")
if self.c_grad_npoints == 0:
raise ValueError(
"analysis.plot_gradient_sparsity: sampling and getting c_gradient first is necessary")
else:
dim = self.c_dim
else:
raise ValueError(
"analysis.plot_gradient_sparsity: select a valid mode 'f' or 'c'")
try:
from matplotlib.pylab import spy, show, title, grid, xlabel, ylabel, xticks, yticks, draw, cla, clf
from numpy import nanmean, asarray
except ImportError:
raise ImportError(
"analysis.plot_gradient_sparsity needs matplotlib and numpy to run. Are they installed?")
if mode == 'f':
title('Gradient/Jacobian Sparsity (' +
str(100 * round(self.grad_sparsity, 4)) + '% sparse)\n')
ylabel('Objective')
matrix = self.average_abs_gradient
else:
title('Constraint Gradient/Jacobian Sparsity (' +
str(100 * round(self.c_grad_sparsity, 4)) + '% sparse)\n')
ylabel('Constraint')
matrix = self.average_abs_c_gradient
grid(True)
xlabel('Dimension')
plot = spy(matrix, precision=zero_tol, markersize=20)
try:
xlocs = range(self.cont_dim)
ylocs = range(dim)
xlabels = [str(i) for i in range(1, self.cont_dim + 1)]
ylabels = [str(i) for i in range(1, dim + 1)]
xticks(xlocs, [x.format(xlocs[i]) for i, x in enumerate(xlabels)])
yticks(ylocs, [y.format(ylocs[i]) for i, y in enumerate(ylabels)])
except (IndexError, ValueError):
pass
f = plot.get_figure()
if self.dir is None:
show(f)
cla()
clf()
else:
f.savefig(self.dir + '/figure_' + str(self.fignum) + '.png')
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
if mode == 'f':
print('*Gradient/Jacobian sparsity plot : <figure_' +
str(self.fignum) + '.png>', file=output)
else:
print('*Constraints Gradient/Jacobian sparsity plot : <figure_' +
str(self.fignum) + '.png>', file=output)
self.fignum += 1
cla()
clf()
output.close()
def plot_gradient_pcp(self, mode='f', invert=False):
"""
Generates Parallel Coordinate Plot of Gradient: magnitude of (scaled) partial
derivative dFi/dXj vs. X et F.
**USAGE:**
analysis.plot_gradient_pcp([mode='c', invert=True])
* mode: 'f'/'c' to use fitness/constraint jacobian matrix.
* invert: if True, parallel axes are objectives, colors are search variables (not suitable
for single-objective problems).if False, parallel axes are search variables, colors are
objectives (not suitable for univariate problems).
**NOTE:** the plot will be shown on screen or saved to file depending on the option that was
selected when instantiating the analysis class.
"""
if mode == 'f':
if self.grad_npoints == 0:
raise ValueError(
"analysis.plot_gradient_pcp: sampling and getting gradient first is necessary")
else:
dim = self.f_dim
grad = self.grad
npoints = self.grad_npoints
string = 'Objective '
elif mode == 'c':
if self.c_dim == 0:
raise ValueError(
"analysis.plot_gradient_pcp: mode 'c' selected for unconstrained problem")
if self.c_grad_npoints == 0:
raise ValueError(
"analysis.plot_gradient_pcp: sampling and getting c_gradient first is necessary")
else:
dim = self.c_dim
grad = self.c_grad
npoints = self.c_grad_npoints
string = 'Constraint '
else:
raise ValueError(
"analysis.plot_gradient_pcp: choose a valid mode 'f' or 'c'")
if invert is False and self.cont_dim == 1:
raise ValueError(
"analysis.plot_gradient_pcp: this plot makes no sense for univariate problems")
if invert is True and dim == 1:
raise ValueError(
"analysis.plot_gradient_pcp: this plot makes no sense for single-" + string.lower() + "problems")
try:
from pandas.tools.plotting import parallel_coordinates as pc
from pandas import DataFrame as df
from matplotlib.pyplot import show, title, grid, ylabel, xlabel, cla, clf, xticks
from numpy import asarray, transpose
except ImportError:
raise ImportError(
"analysis.plot_gradient_pcp needs pandas, numpy and matplotlib to run. Are they installed?")
gradient = []
if invert:
aux = 1
file_string = ' (inverted)'
else:
aux = 0
file_string = ''
for i in range(npoints):
if invert:
tmp = [['x' + str(x + 1) for x in range(self.cont_dim)]]
else:
tmp = []
for j in range(dim):
if invert:
tmp.append([])
else:
if mode == 'c':
if j < self.c_dim - self.ic_dim:
tmp.append(['Constraint h' + str(j + 1)])
else:
tmp.append(
['Constraint g' + str(j - self.c_dim + self.ic_dim + 1)])
else:
tmp.append(['Objective ' + str(j + 1)])
tmp[j + aux].extend(grad[i][j])
if invert:
tmp2 = []
for ii in range(self.cont_dim):
tmp2.append([])
for jj in range(dim + 1):
tmp2[ii].append(tmp[jj][ii])
gradient.extend(tmp2)
else:
gradient.extend(tmp)
gradient = df(gradient)
title(string + 'Gradient/Jacobian PCP \n')
grid(True)
ylabel('Derivative value (scaled)')
if invert:
xlabel(string)
else:
xlabel('Dimension')
plot = pc(gradient, 0)
if mode == 'c' and invert:
try:
xlocs = range(self.c_dim)
xlabels = ['h' + str(i + 1) for i in range(self.c_dim - self.ic_dim)] + [
'g' + str(i + 1) for i in range(self.ic_dim)]
xticks(xlocs, [x.format(xlocs[i])
for i, x in enumerate(xlabels)])
except (IndexError, ValueError):
pass
f = plot.get_figure()
if self.dir is None:
show(f)
cla()
clf()
else:
f.savefig(self.dir + '/figure_' + str(self.fignum) + '.png')
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print('*' + string + 'Gradient/Jacobian PCP plot' + file_string +
' : <figure_' + str(self.fignum) + '.png>', file=output)
self.fignum += 1
cla()
clf()
output.close()
def plot_local_cluster_pcp(self, together=True, clusters_to_plot=10):
"""
Generates a Parallel Coordinate Plot of the clusters obtained on the local search results.
The parallel axes represent the chromosome of the initial points of each local search and
the colors are the clusters to which its local search resulting points belong.
**USAGE:**
analysis.plot_local_cluster_pcp([together=True, clusters_to_plot=5])
* together: if True, a single plot will be generated. If False, each cluster will be presented
in a separate plot. Defaults to True.
* clusters_to_plot: number of clusters to show. Option 'all' will plot all the clusters obtained.
Otherwise the best clusters will be shown. Clusters are rated by mean decomposed fitness value.
Defaults to 10.
**NOTE:** the plot will be shown on screen or saved to file depending on the option that was
selected when instantiating the analysis class.
"""
if self.local_nclusters == 0:
raise ValueError(
"analysis.plot_local_cluster_pcp: sampling, getting local extrema and clustering them first is necessary")
if self.dim == 1:
raise ValueError(
"analysis.plot_local_cluster_pcp: this makes no sense for univariate problems")
try:
from pandas.tools.plotting import parallel_coordinates as pc
from pandas import DataFrame as df
from matplotlib.pyplot import show, title, grid, ylabel, xlabel, legend, plot, subplot, cla, clf
from numpy import asarray, transpose
except ImportError:
raise ImportError(
"analysis.plot_gradient_pcp needs pandas, numpy and matplotlib to run. Are they installed?")
if clusters_to_plot == 'all':
clusters_to_plot = self.local_nclusters
if together:
n = 1
dataset = [[]]
for i in range(self.local_initial_npoints):
if self.local_cluster[i] < clusters_to_plot:
dataset[0].append(
[self.local_cluster[i] + 1] + self.points[self.local_initial_points[i]])
dataset[0].sort(key=lambda row: -row[0])
separatelabel = ['' for i in range(self.local_nclusters)]
else:
n = min([clusters_to_plot, self.local_nclusters])
dataset = [[] for i in range(clusters_to_plot)]
for i in range(self.local_initial_npoints):
if self.local_cluster[i] < clusters_to_plot:
dataset[self.local_cluster[i]].append(
[self.local_cluster[i] + 1] + self.points[self.local_initial_points[i]])
separatelabel = [
': cluster ' + str(i + 1) for i in range(self.local_nclusters)]
flist = []
for i in range(n):
dataframe = df(dataset[i])
title('Local extrema clusters PCP' + separatelabel[i] + ' \n')
grid(True)
xlabel('Dimension')
ylabel('Chromosome (scaled)')
plot = pc(dataframe, 0)
f = plot.get_figure()
if self.dir is None:
show(f)
cla()
clf()
else:
f.savefig(self.dir + '/figure_' + str(self.fignum) + '.png')
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
if together:
aux = '(global)'
else:
aux = '(cluster n.' + str(i + 1) + ')'
print('*Cluster PCP plot ' + aux + ' : <figure_' +
str(self.fignum) + '.png>', file=output)
self.fignum += 1
cla()
clf()
output.close()
def plot_local_cluster_scatter(self, dimensions='all', clusters_to_plot=10):
"""
Generates a Scatter Plot of the clusters obtained for the local search results in the
dimensions specified (up to 3). Points on the plot are local search initial points and
colors are the cluster to which their corresponding final points belong. Cluster X-centers
are also shown. These are computed as specified in analysis._cluster_local_extrema.
**USAGE:**
analysis.plot_local_cluster_scatter([dimensions=[1,2], save_fig=False])
* dimensions: list of up to 3 dimensions in the search space that will be shown in the scatter
plot (zero based). If set to 'all', the whole search space will be taken. An error will be
raised when trying to plot more than 3 dimensions. Defaults to 'all'.
* clusters_to_plot: number of clusters to show. The best clusters will be shown. Clusters
are rated by mean decomposed fitness value. Defaults to 10.
**NOTE:** the plot will be shown on screen or saved to file depending on the option that was
selected when instantiating the analysis class.
"""
if self.local_nclusters == 0:
raise ValueError(
"analysis.plot_local_cluster_scatter: sampling, getting local extrema and clustering them first is necessary")
if dimensions == 'all':
dimensions = range(self.dim)
if isinstance(dimensions, int):
dimensions = [dimensions]
if len(dimensions) > 3 or len(dimensions) == 0:
raise ValueError(
"analysis.plot_local_cluster_scatter: choose 1, 2 or 3 dimensions to plot")
try:
from matplotlib.pyplot import show, title, grid, legend, axes, figure, cla, clf
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.cm import Set1
from numpy import asarray, linspace
except ImportError:
raise ImportError(
"analysis.plot_local_cluster_scatter needs numpy and matplotlib to run. Are they installed?")
dataset = []
if clusters_to_plot == 'all':
clusters_to_plot = self.local_nclusters
else:
clusters_to_plot = min(clusters_to_plot, self.local_nclusters)
npoints = 0
c = []
colors = Set1(linspace(0, 1, clusters_to_plot))
for i in range(self.local_initial_npoints):
if self.local_cluster[i] < clusters_to_plot:
dataset.append(
[self.points[self.local_initial_points[i]][j] for j in dimensions])
c.append(colors[self.local_cluster[i]])
npoints += 1
dataset = asarray(dataset)
centers = self.local_cluster_x_centers[:clusters_to_plot]
if len(dimensions) == 1:
ax = axes()
ax.scatter(dataset, [0 for i in range(npoints)], c=c)
ax.set_xlim(0, 1)
ax.set_ylim(-0.1, 0.1)
ax.set_yticklabels([])
ax.set_xlabel('x' + str(dimensions[0] + 1))
grid(True)
for i in range(clusters_to_plot):
ax.scatter(
centers[i][0], 0.005, marker='^', color=colors[i], s=100)
ax.text(centers[i][0], 0.01, 'cluster ' + str(i + 1), horizontalalignment='center',
verticalalignment='bottom', color=colors[i], rotation='vertical', size=12, backgroundcolor='w')
elif len(dimensions) == 2:
ax = axes()
ax.scatter(dataset[:, 0], dataset[:, 1], c=c)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel('x' + str(dimensions[0] + 1))
ax.set_ylabel('x' + str(dimensions[1] + 1))
grid(True)
for i in range(clusters_to_plot):
ax.scatter(
centers[i][0], centers[i][1], marker='^', color=colors[i], s=100)
ax.text(centers[i][0] + .02, centers[i][1], 'cluster ' + str(i + 1), horizontalalignment='left',
verticalalignment='center', color=colors[i], size=12, backgroundcolor='w')
else:
ax = figure().add_subplot(111, projection='3d')
ax.scatter(dataset[:, 0], dataset[:, 1], dataset[:, 2], c=c)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
ax.set_xlabel('x' + str(dimensions[0] + 1))
ax.set_ylabel('x' + str(dimensions[1] + 1))
ax.set_zlabel('x' + str(dimensions[2] + 1))
for i in range(clusters_to_plot):
ax.scatter(centers[i][0], centers[i][1], centers[i][
2], marker='^', color=colors[i], s=100)
ax.text(centers[i][0], centers[i][1] + 0.02, centers[i][2], 'cluster ' + str(i + 1),
horizontalalignment='left', verticalalignment='center', color=colors[i], size=12, backgroundcolor='w')
title('Local extrema clusters scatter plot')
f = ax.get_figure()
if self.dir is None:
show(f)
cla()
clf()
else:
f.savefig(self.dir + '/figure_' + str(self.fignum) + '.png')
output = open(self.dir + '/log.txt', 'r+')
output.seek(0, 2)
print('*Cluster scatter plot (dimensions ' +
str([i + 1 for i in dimensions]) + ') : <figure_' + str(self.fignum) + '.png>', file=output)
self.fignum += 1
cla()
clf()
| gpl-3.0 |
napjon/moocs_solution | ml-udacity/k_means/k_means_cluster.py | 6 | 2570 | #!/usr/bin/python
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
| mit |
zhyuey/maps | usa_map_general/usa_map.py | 1 | 2854 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
import sys, os
import shapefile
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.collections import LineCollection
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
curdir = sys.path[0] + '/'
mpl.rcParams['font.family'] = 'sans-serif'
thisblue = '#23238e'
fig = plt.figure(figsize=(11.7, 8.3))
plt.subplots_adjust(
left=0.05, right=0.95, top=0.95, bottom=0.05, wspace=0.15, hspace=0.05)
ax = plt.subplot(111)
x1 = -128.
x2 = -63.5
y1 = 24
y2 = 51
m = Basemap(resolution='i', projection='merc', llcrnrlat=y1,
urcrnrlat=y2, llcrnrlon=x1, urcrnrlon=x2)
m.fillcontinents(color='0.8')
m.drawmapboundary(fill_color= thisblue)
m.drawparallels(np.arange(y1, y2, 5.), labels=[
1, 0, 0, 0], color='black', labelstyle='+/-', linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(x1, x2, 5.), labels=[
0, 0, 0, 1], color='black', labelstyle='+/-', linewidth=0.2) # draw meridians
r = shapefile.Reader(curdir + "USA_adm1")
shapes = r.shapes()
records = r.records()
cnt = 0
for record, shape in zip(records, shapes):
print(cnt)
lons,lats = zip(*shape.points)
data = np.array(m(lons, lats)).T
if len(shape.parts) == 1:
segs = [data,]
else:
segs = []
for i in range(1,len(shape.parts)):
index = shape.parts[i-1]
index2 = shape.parts[i]
segs.append(data[index:index2])
segs.append(data[index2:])
lines = LineCollection(segs,antialiaseds=(1,))
lines.set_facecolors(np.random.rand(3, 1) * 0.5 + 0.5)
lines.set_edgecolors('k')
lines.set_linewidth(0.1)
ax.add_collection(lines)
cnt += 1
infile = open(curdir +'state_info_revised.csv','r')
csvfile = csv.reader(infile)
for lakepoly in m.lakepolygons:
lp = Polygon(lakepoly.boundary, zorder=3)
lp.set_facecolor(thisblue)
lp.set_linewidth(0.1)
ax.add_patch(lp)
for line in csvfile:
lon = (float(line[0]) + float(line[2]))/2 + float(line[5])
lat = (float(line[1]) + float(line[3]))/2 + float(line[6])
x, y = m(lon, lat)
name = line[4].replace('\\n', '\n')
plt.text(x, y, name, horizontalalignment='center', verticalalignment='center', fontsize=int(line[7]))
xx, yy = m(-72.0, 26.0)
plt.text(xx, yy, u'Made by zhyuey', color='yellow')
plt.title('Map of contiguous United States', fontsize=24)
# plt.savefig('usa_state_75.png', dpi=75)
# plt.savefig('usa_state_75.png', dpi=75)
plt.savefig('usa_state_300.png', dpi=300)
# plt.savefig('usa_state_600.png', dpi=600)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.